aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c32
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/amd/7990.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c83
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c107
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c65
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h7
-rw-r--r--drivers/net/ethernet/apple/Kconfig2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig19
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5728
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1086
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c1149
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h104
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h4046
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c816
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c154
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c206
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1470
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c122
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c3
-rw-r--r--drivers/net/ethernet/ethoc.c14
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c157
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h12
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c45
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig35
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile4
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_mdio.c185
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/Makefile12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c457
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h585
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c783
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c704
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h45
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c902
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h456
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c2474
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h428
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c310
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h43
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c583
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h105
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c1021
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h139
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h972
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c837
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c1642
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c1214
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c521
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h19
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c220
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c50
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c48
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c39
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c198
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c105
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h70
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c79
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c292
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c168
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c406
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1159
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c430
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c426
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h57
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h124
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c91
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c57
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c41
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h55
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c293
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h41
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h116
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c195
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c64
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c35
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c3
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c202
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c226
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c136
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c291
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c102
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c503
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c41
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c44
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c433
-rw-r--r--drivers/net/ethernet/marvell/sky2.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c167
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c192
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c571
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1263
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1949
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h122
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c903
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h1
-rw-r--r--drivers/net/ethernet/microchip/Kconfig9
-rw-r--r--drivers/net/ethernet/microchip/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c513
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c1129
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h437
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c4
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h496
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c847
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h139
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c1797
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h283
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h5291
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c776
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h263
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c798
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c531
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h110
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c1134
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h391
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c1704
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c1169
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c860
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h369
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h366
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h360
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c170
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c860
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h285
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c385
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2584
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c7
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c24
-rw-r--r--drivers/net/ethernet/realtek/8139too.c1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h7
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c105
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c22
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/rocker/rocker.c467
-rw-r--r--drivers/net/ethernet/sfc/ef10.c34
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h132
-rw-r--r--drivers/net/ethernet/sfc/tx.c30
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c29
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h99
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c5
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c64
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c33
-rw-r--r--drivers/net/ethernet/ti/cpsw.h3
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c52
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c9
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c5
286 files changed, 65316 insertions, 4201 deletions
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index edf72258ab1d..29c3075bfb05 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -64,7 +64,7 @@ config ARM_ETHERH
should say Y to this option if you wish to use it with Linux.
config MAC8390
- bool "Macintosh NS 8390 based ethernet cards"
+ tristate "Macintosh NS 8390 based ethernet cards"
depends on MAC
select CRC32
---help---
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 65cf60f6718c..b9283901136e 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -454,34 +454,22 @@ MODULE_AUTHOR("David Huggins-Daines <dhd@debian.org> and others");
MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver");
MODULE_LICENSE("GPL");
-/* overkill, of course */
-static struct net_device *dev_mac8390[15];
-int init_module(void)
+static struct net_device *dev_mac8390;
+
+int __init init_module(void)
{
- int i;
- for (i = 0; i < 15; i++) {
- struct net_device *dev = mac8390_probe(-1);
- if (IS_ERR(dev))
- break;
- dev_mac890[i] = dev;
- }
- if (!i) {
- pr_notice("No useable cards found, driver NOT installed.\n");
- return -ENODEV;
+ dev_mac8390 = mac8390_probe(-1);
+ if (IS_ERR(dev_mac8390)) {
+ pr_warn("mac8390: No card found\n");
+ return PTR_ERR(dev_mac8390);
}
return 0;
}
-void cleanup_module(void)
+void __exit cleanup_module(void)
{
- int i;
- for (i = 0; i < 15; i++) {
- struct net_device *dev = dev_mac890[i];
- if (dev) {
- unregister_netdev(dev);
- free_netdev(dev);
- }
- }
+ unregister_netdev(dev_mac8390);
+ free_netdev(dev_mac8390);
}
#endif /* MODULE */
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index ae89de7deb13..20bf55dbd76f 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1141,8 +1141,6 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
strlcpy(info->version, "revision: 1.0", sizeof(info->version));
strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- info->eedump_len = 0;
- info->regdump_len = sizeof(struct greth_regs);
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 98a10d555b79..66d0b73c39c0 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -661,6 +661,7 @@ void lance_poll(struct net_device *dev)
spin_unlock(&lp->devlock);
lance_interrupt(dev->irq, dev);
}
+EXPORT_SYMBOL_GPL(lance_poll);
#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index afc62ea804fc..0038709fd317 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -100,7 +100,7 @@ config DECLANCE
DEPCA series. (This chipset is better known via the NE2100 cards.)
config HPLANCE
- bool "HP on-board LANCE support"
+ tristate "HP on-board LANCE support"
depends on DIO
select CRC32
---help---
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index cb367cc59e0b..5330bcb8a944 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -714,7 +714,6 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
- info->regdump_len = 0;
}
static void au1000_set_msglevel(struct net_device *dev, u32 value)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f672dba345f7..970781a9e677 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1940,84 +1940,31 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
unsigned int queue_count)
{
- unsigned int q_fifo_size = 0;
- enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo;
- /* Calculate Tx/Rx fifo share per queue */
- switch (fifo_size) {
- case 0:
- q_fifo_size = XGBE_FIFO_SIZE_B(128);
- break;
- case 1:
- q_fifo_size = XGBE_FIFO_SIZE_B(256);
- break;
- case 2:
- q_fifo_size = XGBE_FIFO_SIZE_B(512);
- break;
- case 3:
- q_fifo_size = XGBE_FIFO_SIZE_KB(1);
- break;
- case 4:
- q_fifo_size = XGBE_FIFO_SIZE_KB(2);
- break;
- case 5:
- q_fifo_size = XGBE_FIFO_SIZE_KB(4);
- break;
- case 6:
- q_fifo_size = XGBE_FIFO_SIZE_KB(8);
- break;
- case 7:
- q_fifo_size = XGBE_FIFO_SIZE_KB(16);
- break;
- case 8:
- q_fifo_size = XGBE_FIFO_SIZE_KB(32);
- break;
- case 9:
- q_fifo_size = XGBE_FIFO_SIZE_KB(64);
- break;
- case 10:
- q_fifo_size = XGBE_FIFO_SIZE_KB(128);
- break;
- case 11:
- q_fifo_size = XGBE_FIFO_SIZE_KB(256);
- break;
- }
+ /* Calculate the configured fifo size */
+ q_fifo_size = 1 << (fifo_size + 7);
- /* The configured value is not the actual amount of fifo RAM */
+ /* The configured value may not be the actual amount of fifo RAM */
q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
q_fifo_size = q_fifo_size / queue_count;
- /* Set the queue fifo size programmable value */
- if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
- p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
- p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
- p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
- p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
- p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
- p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
- p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
- p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
- p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
- p_fifo = XGMAC_MTL_FIFO_SIZE_512;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
- p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+ /* Each increment in the queue fifo size represents 256 bytes of
+ * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+ * between the queues.
+ */
+ p_fifo = q_fifo_size / 256;
+ if (p_fifo)
+ p_fifo--;
return p_fifo;
}
static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
{
- enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int fifo_size;
unsigned int i;
fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
@@ -2033,7 +1980,7 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
{
- enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int fifo_size;
unsigned int i;
fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
@@ -2224,7 +2171,7 @@ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
default:
read_hi = false;
- };
+ }
val = XGMAC_IOREAD(pdata, reg_lo);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index dde0486667e0..53ce1222b11d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -360,6 +360,9 @@ static irqreturn_t xgbe_isr(int irq, void *data)
}
}
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
+ pdata->ext_stats.rx_buffer_unavailable++;
+
/* Restart the device on a Fatal Bus Error */
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
schedule_work(&pdata->restart_work);
@@ -384,7 +387,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
/* Read Tx Timestamp to clear interrupt */
pdata->tx_tstamp =
hw_if->get_tx_tstamp(pdata);
- schedule_work(&pdata->tx_tstamp_work);
+ queue_work(pdata->dev_workqueue,
+ &pdata->tx_tstamp_work);
}
}
}
@@ -450,7 +454,7 @@ static void xgbe_service_timer(unsigned long data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- schedule_work(&pdata->service_work);
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
mod_timer(&pdata->service_timer, jiffies + HZ);
}
@@ -891,7 +895,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
netif_tx_start_all_queues(netdev);
xgbe_start_timers(pdata);
- schedule_work(&pdata->service_work);
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
DBGPR("<--xgbe_start\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 59e090e95c0e..6040293db9c1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -179,6 +179,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
+ XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
};
#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
@@ -187,8 +188,6 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
int i;
- DBGPR("-->%s\n", __func__);
-
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < XGBE_STATS_COUNT; i++) {
@@ -198,8 +197,6 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
break;
}
-
- DBGPR("<--%s\n", __func__);
}
static void xgbe_get_ethtool_stats(struct net_device *netdev,
@@ -209,23 +206,17 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
u8 *stat;
int i;
- DBGPR("-->%s\n", __func__);
-
pdata->hw_if.read_mmc_stats(pdata);
for (i = 0; i < XGBE_STATS_COUNT; i++) {
stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
*data++ = *(u64 *)stat;
}
-
- DBGPR("<--%s\n", __func__);
}
static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
{
int ret;
- DBGPR("-->%s\n", __func__);
-
switch (stringset) {
case ETH_SS_STATS:
ret = XGBE_STATS_COUNT;
@@ -235,8 +226,6 @@ static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
ret = -EOPNOTSUPP;
}
- DBGPR("<--%s\n", __func__);
-
return ret;
}
@@ -245,13 +234,9 @@ static void xgbe_get_pauseparam(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- DBGPR("-->xgbe_get_pauseparam\n");
-
pause->autoneg = pdata->phy.pause_autoneg;
pause->tx_pause = pdata->phy.tx_pause;
pause->rx_pause = pdata->phy.rx_pause;
-
- DBGPR("<--xgbe_get_pauseparam\n");
}
static int xgbe_set_pauseparam(struct net_device *netdev,
@@ -260,13 +245,11 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
int ret = 0;
- DBGPR("-->xgbe_set_pauseparam\n");
-
- DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
- pause->autoneg, pause->tx_pause, pause->rx_pause);
-
- if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
+ if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
+ netdev_err(netdev,
+ "autoneg disabled, pause autoneg not avialable\n");
return -EINVAL;
+ }
pdata->phy.pause_autoneg = pause->autoneg;
pdata->phy.tx_pause = pause->tx_pause;
@@ -286,8 +269,6 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
if (netif_running(netdev))
ret = pdata->phy_if.phy_config_aneg(pdata);
- DBGPR("<--xgbe_set_pauseparam\n");
-
return ret;
}
@@ -296,8 +277,6 @@ static int xgbe_get_settings(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- DBGPR("-->xgbe_get_settings\n");
-
cmd->phy_address = pdata->phy.address;
cmd->supported = pdata->phy.supported;
@@ -311,8 +290,6 @@ static int xgbe_get_settings(struct net_device *netdev,
cmd->port = PORT_NONE;
cmd->transceiver = XCVR_INTERNAL;
- DBGPR("<--xgbe_get_settings\n");
-
return 0;
}
@@ -323,16 +300,20 @@ static int xgbe_set_settings(struct net_device *netdev,
u32 speed;
int ret;
- DBGPR("-->xgbe_set_settings\n");
-
speed = ethtool_cmd_speed(cmd);
- if (cmd->phy_address != pdata->phy.address)
+ if (cmd->phy_address != pdata->phy.address) {
+ netdev_err(netdev, "invalid phy address %hhu\n",
+ cmd->phy_address);
return -EINVAL;
+ }
if ((cmd->autoneg != AUTONEG_ENABLE) &&
- (cmd->autoneg != AUTONEG_DISABLE))
+ (cmd->autoneg != AUTONEG_DISABLE)) {
+ netdev_err(netdev, "unsupported autoneg %hhu\n",
+ cmd->autoneg);
return -EINVAL;
+ }
if (cmd->autoneg == AUTONEG_DISABLE) {
switch (speed) {
@@ -341,16 +322,27 @@ static int xgbe_set_settings(struct net_device *netdev,
case SPEED_1000:
break;
default:
+ netdev_err(netdev, "unsupported speed %u\n", speed);
return -EINVAL;
}
- if (cmd->duplex != DUPLEX_FULL)
+ if (cmd->duplex != DUPLEX_FULL) {
+ netdev_err(netdev, "unsupported duplex %hhu\n",
+ cmd->duplex);
return -EINVAL;
+ }
}
+ netif_dbg(pdata, link, netdev,
+ "requested advertisement %#x, phy supported %#x\n",
+ cmd->advertising, pdata->phy.supported);
+
cmd->advertising &= pdata->phy.supported;
- if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
+ if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) {
+ netdev_err(netdev,
+ "unsupported requested advertisement\n");
return -EINVAL;
+ }
ret = 0;
pdata->phy.autoneg = cmd->autoneg;
@@ -366,8 +358,6 @@ static int xgbe_set_settings(struct net_device *netdev,
if (netif_running(netdev))
ret = pdata->phy_if.phy_config_aneg(pdata);
- DBGPR("<--xgbe_set_settings\n");
-
return ret;
}
@@ -385,7 +375,20 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
- drvinfo->n_stats = XGBE_STATS_COUNT;
+}
+
+static u32 xgbe_get_msglevel(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return pdata->msg_enable;
+}
+
+static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ pdata->msg_enable = msglevel;
}
static int xgbe_get_coalesce(struct net_device *netdev,
@@ -393,8 +396,6 @@ static int xgbe_get_coalesce(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- DBGPR("-->xgbe_get_coalesce\n");
-
memset(ec, 0, sizeof(struct ethtool_coalesce));
ec->rx_coalesce_usecs = pdata->rx_usecs;
@@ -402,8 +403,6 @@ static int xgbe_get_coalesce(struct net_device *netdev,
ec->tx_max_coalesced_frames = pdata->tx_frames;
- DBGPR("<--xgbe_get_coalesce\n");
-
return 0;
}
@@ -415,8 +414,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
unsigned int rx_frames, rx_riwt, rx_usecs;
unsigned int tx_frames;
- DBGPR("-->xgbe_set_coalesce\n");
-
/* Check for not supported parameters */
if ((ec->rx_coalesce_usecs_irq) ||
(ec->rx_max_coalesced_frames_irq) ||
@@ -436,8 +433,10 @@ static int xgbe_set_coalesce(struct net_device *netdev,
(ec->rx_max_coalesced_frames_high) ||
(ec->tx_coalesce_usecs_high) ||
(ec->tx_max_coalesced_frames_high) ||
- (ec->rate_sample_interval))
+ (ec->rate_sample_interval)) {
+ netdev_err(netdev, "unsupported coalescing parameter\n");
return -EOPNOTSUPP;
+ }
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
@@ -449,13 +448,13 @@ static int xgbe_set_coalesce(struct net_device *netdev,
/* Check the bounds of values for Rx */
if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
- netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
- hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+ netdev_err(netdev, "rx-usec is limited to %d usecs\n",
+ hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
return -EINVAL;
}
if (rx_frames > pdata->rx_desc_count) {
- netdev_alert(netdev, "rx-frames is limited to %d frames\n",
- pdata->rx_desc_count);
+ netdev_err(netdev, "rx-frames is limited to %d frames\n",
+ pdata->rx_desc_count);
return -EINVAL;
}
@@ -463,8 +462,8 @@ static int xgbe_set_coalesce(struct net_device *netdev,
/* Check the bounds of values for Tx */
if (tx_frames > pdata->tx_desc_count) {
- netdev_alert(netdev, "tx-frames is limited to %d frames\n",
- pdata->tx_desc_count);
+ netdev_err(netdev, "tx-frames is limited to %d frames\n",
+ pdata->tx_desc_count);
return -EINVAL;
}
@@ -476,8 +475,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
pdata->tx_frames = tx_frames;
hw_if->config_tx_coalesce(pdata);
- DBGPR("<--xgbe_set_coalesce\n");
-
return 0;
}
@@ -539,8 +536,10 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int ret;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ netdev_err(netdev, "unsupported hash function\n");
return -EOPNOTSUPP;
+ }
if (indir) {
ret = hw_if->set_rss_lookup_table(pdata, indir);
@@ -594,6 +593,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_settings = xgbe_get_settings,
.set_settings = xgbe_set_settings,
.get_drvinfo = xgbe_get_drvinfo,
+ .get_msglevel = xgbe_get_msglevel,
+ .set_msglevel = xgbe_set_msglevel,
.get_link = ethtool_op_get_link,
.get_coalesce = xgbe_get_coalesce,
.set_coalesce = xgbe_set_coalesce,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index e83bd76abce6..7dd893331785 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -371,7 +371,7 @@ static int xgbe_probe(struct platform_device *pdev)
set_bit(XGBE_DOWN, &pdata->dev_state);
/* Check if we should use ACPI or DT */
- pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+ pdata->use_acpi = dev->of_node ? 0 : 1;
phy_pdev = xgbe_get_phy_pdev(pdata);
if (!phy_pdev) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 9088c3a35a20..446058081866 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1115,8 +1115,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
unsigned int reg, link_aneg;
if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
- if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
- netif_carrier_off(pdata->netdev);
+ netif_carrier_off(pdata->netdev);
pdata->phy.link = 0;
goto adjust_link;
@@ -1142,10 +1141,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
- if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
- set_bit(XGBE_LINK, &pdata->dev_state);
- netif_carrier_on(pdata->netdev);
- }
+ netif_carrier_on(pdata->netdev);
} else {
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
xgbe_check_link_timeout(pdata);
@@ -1156,10 +1152,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
xgbe_phy_status_aneg(pdata);
- if (test_bit(XGBE_LINK, &pdata->dev_state)) {
- clear_bit(XGBE_LINK, &pdata->dev_state);
- netif_carrier_off(pdata->netdev);
- }
+ netif_carrier_off(pdata->netdev);
}
adjust_link:
@@ -1179,8 +1172,7 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
pdata->phy.link = 0;
- if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
- netif_carrier_off(pdata->netdev);
+ netif_carrier_off(pdata->netdev);
xgbe_phy_adjust_link(pdata);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 8c9d01ef730d..e234b9970318 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -209,8 +209,6 @@
#define XGMAC_IOCTL_CONTEXT 2
#define XGBE_FIFO_MAX 81920
-#define XGBE_FIFO_SIZE_B(x) (x)
-#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
#define XGBE_TC_MIN_QUANTUM 10
@@ -461,7 +459,6 @@ struct xgbe_channel {
enum xgbe_state {
XGBE_DOWN,
- XGBE_LINK,
XGBE_LINK_INIT,
XGBE_LINK_ERR,
};
@@ -483,20 +480,6 @@ enum xgbe_int_state {
XGMAC_INT_STATE_RESTORE,
};
-enum xgbe_mtl_fifo_size {
- XGMAC_MTL_FIFO_SIZE_256 = 0x00,
- XGMAC_MTL_FIFO_SIZE_512 = 0x01,
- XGMAC_MTL_FIFO_SIZE_1K = 0x03,
- XGMAC_MTL_FIFO_SIZE_2K = 0x07,
- XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
- XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
- XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
- XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
- XGMAC_MTL_FIFO_SIZE_64K = 0xff,
- XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
- XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
-};
-
enum xgbe_speed {
XGBE_SPEED_1000 = 0,
XGBE_SPEED_2500,
@@ -598,6 +581,7 @@ struct xgbe_mmc_stats {
struct xgbe_ext_stats {
u64 tx_tso_packets;
u64 rx_split_header_packets;
+ u64 rx_buffer_unavailable;
};
struct xgbe_hw_if {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index c4bb8027b3fb..33850a0f7e82 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -107,7 +107,8 @@ static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
{
xgene_enet_ring_set_type(ring);
- if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0)
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
+ xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
xgene_enet_ring_set_recombbuf(ring);
xgene_enet_ring_init(ring);
@@ -460,6 +461,7 @@ static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
{
+ struct device *dev = &pdata->pdev->dev;
u32 value, mc2;
u32 intf_ctl, rgmii;
u32 icm0, icm2;
@@ -489,7 +491,12 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
default:
ENET_INTERFACE_MODE2_SET(&mc2, 2);
intf_ctl |= ENET_GHD_MODE;
- CFG_TXCLK_MUXSEL0_SET(&rgmii, 4);
+
+ if (dev->of_node) {
+ CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
+ CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
+ }
+
xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index ff05bbcff26d..6dee73c3c1e1 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -144,6 +144,7 @@ enum xgene_enet_rm {
#define CFG_BYPASS_UNISEC_RX BIT(1)
#define CFG_CLE_BYPASS_EN0 BIT(31)
#define CFG_TXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 29, 3)
+#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index e47298faf78d..ce1068771b32 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1118,6 +1118,47 @@ static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pda
return ret;
}
+static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+ int delay, ret;
+
+ ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
+ if (ret) {
+ pdata->tx_delay = 4;
+ return 0;
+ }
+
+ if (delay < 0 || delay > 7) {
+ dev_err(dev, "Invalid tx-delay specified\n");
+ return -EINVAL;
+ }
+
+ pdata->tx_delay = delay;
+
+ return 0;
+}
+
+static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+ int delay, ret;
+
+ ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
+ if (ret) {
+ pdata->rx_delay = 2;
+ return 0;
+ }
+
+ if (delay < 0 || delay > 7) {
+ dev_err(dev, "Invalid rx-delay specified\n");
+ return -EINVAL;
+ }
+
+ pdata->rx_delay = delay;
+
+ return 0;
+}
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
@@ -1194,6 +1235,14 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
return -ENODEV;
}
+ ret = xgene_get_tx_delay(pdata);
+ if (ret)
+ return ret;
+
+ ret = xgene_get_rx_delay(pdata);
+ if (ret)
+ return ret;
+
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(dev, "Unable to get ENET Rx IRQ\n");
@@ -1305,10 +1354,17 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
pdata->ring_num = START_RING_NUM_0;
break;
case 1:
- pdata->cpu_bufnum = START_CPU_BUFNUM_1;
- pdata->eth_bufnum = START_ETH_BUFNUM_1;
- pdata->bp_bufnum = START_BP_BUFNUM_1;
- pdata->ring_num = START_RING_NUM_1;
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
+ pdata->ring_num = XG_START_RING_NUM_1;
+ } else {
+ pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = START_BP_BUFNUM_1;
+ pdata->ring_num = START_RING_NUM_1;
+ }
break;
default:
break;
@@ -1478,6 +1534,7 @@ static const struct acpi_device_id xgene_enet_acpi_match[] = {
{ "APMC0D05", XGENE_ENET1},
{ "APMC0D30", XGENE_ENET1},
{ "APMC0D31", XGENE_ENET1},
+ { "APMC0D3F", XGENE_ENET1},
{ "APMC0D26", XGENE_ENET2},
{ "APMC0D25", XGENE_ENET2},
{ }
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 50f92c39ed2a..a6e56b88c0a0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -56,6 +56,11 @@
#define START_BP_BUFNUM_1 0x2A
#define START_RING_NUM_1 264
+#define XG_START_CPU_BUFNUM_1 12
+#define XG_START_ETH_BUFNUM_1 2
+#define XG_START_BP_BUFNUM_1 0x22
+#define XG_START_RING_NUM_1 264
+
#define X2_START_CPU_BUFNUM_0 0
#define X2_START_ETH_BUFNUM_0 0
#define X2_START_BP_BUFNUM_0 0x20
@@ -179,6 +184,8 @@ struct xgene_enet_pdata {
u8 bp_bufnum;
u16 ring_num;
u32 mss;
+ u8 tx_delay;
+ u8 rx_delay;
};
struct xgene_indirect_ctl {
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index d19a41b0c6d2..31071297896c 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -51,7 +51,7 @@ config BMAC
will be called bmac.
config MACMACE
- bool "Macintosh (AV) onboard MACE ethernet"
+ tristate "Macintosh (AV) onboard MACE ethernet"
depends on MAC
select CRC32
---help---
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 48694c239d5c..872b7abb0196 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -233,10 +233,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = atl1c_get_regs_len(netdev);
- drvinfo->eedump_len = atl1c_get_eeprom_len(netdev);
}
static void atl1c_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 1be072f4afc2..8e3dbd4d9f79 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -316,10 +316,6 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = atl1e_get_regs_len(netdev);
- drvinfo->eedump_len = atl1e_get_eeprom_len(netdev);
}
static void atl1e_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index eca1d113fee1..529bca718334 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3388,7 +3388,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->eedump_len = ATL1_EEDUMP_LEN;
}
static void atl1_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 46a535318c7a..8f76f4558a88 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -2030,10 +2030,6 @@ static void atl2_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = atl2_get_regs_len(netdev);
- drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
}
static void atl2_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index e930aa9a3cfb..67a7d520d9f5 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -170,4 +170,23 @@ config SYSTEMPORT
Broadcom BCM7xxx Set Top Box family chipset using an internal
Ethernet switch.
+config BNXT
+ tristate "Broadcom NetXtreme-C/E support"
+ depends on PCI
+ select FW_LOADER
+ select LIBCRC32C
+ ---help---
+ This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
+ Ethernet cards. To compile this driver as a module, choose M here:
+ the module will be called bnxt_en. This is recommended.
+
+config BNXT_SRIOV
+ bool "Broadcom NetXtreme-C/E SR-IOV support"
+ depends on BNXT && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support in the NetXtreme-C/E products. This
+ allows for virtual function acceleration in virtual environments.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index e2a958a657e0..00584d78b3e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
+obj-$(CONFIG_BNXT) += bnxt/
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 4183c2abeeeb..8b1929e9f698 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1333,7 +1333,6 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
- drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
@@ -2602,7 +2601,6 @@ static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
strncpy(drvinfo->version, bcm_enet_driver_version, 32);
strncpy(drvinfo->fw_version, "N/A", 32);
strncpy(drvinfo->bus_info, "bcm63xx", 32);
- drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
}
static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f1b5364f3521..858106352ce9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -287,7 +287,6 @@ static void bcm_sysport_get_drvinfo(struct net_device *dev,
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, "0.1", sizeof(info->version));
strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
- info->n_stats = BCM_SYSPORT_STATS_LEN;
}
static u32 bcm_sysport_get_msglvl(struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2b66ef3d8217..8fc3f3c137f8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -813,6 +813,46 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
}
static void
+bnx2_free_stats_blk(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (bp->status_blk) {
+ dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
+ bp->status_blk,
+ bp->status_blk_mapping);
+ bp->status_blk = NULL;
+ bp->stats_blk = NULL;
+ }
+}
+
+static int
+bnx2_alloc_stats_blk(struct net_device *dev)
+{
+ int status_blk_size;
+ void *status_blk;
+ struct bnx2 *bp = netdev_priv(dev);
+
+ /* Combine status and statistics blocks into one allocation. */
+ status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
+ if (bp->flags & BNX2_FLAG_MSIX_CAP)
+ status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
+ BNX2_SBLK_MSIX_ALIGN_SIZE);
+ bp->status_stats_size = status_blk_size +
+ sizeof(struct statistics_block);
+ status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
+ if (status_blk == NULL)
+ return -ENOMEM;
+
+ bp->status_blk = status_blk;
+ bp->stats_blk = status_blk + status_blk_size;
+ bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
+
+ return 0;
+}
+
+static void
bnx2_free_mem(struct bnx2 *bp)
{
int i;
@@ -829,37 +869,19 @@ bnx2_free_mem(struct bnx2 *bp)
bp->ctx_blk[i] = NULL;
}
}
- if (bnapi->status_blk.msi) {
- dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
- bnapi->status_blk.msi,
- bp->status_blk_mapping);
+
+ if (bnapi->status_blk.msi)
bnapi->status_blk.msi = NULL;
- bp->stats_blk = NULL;
- }
}
static int
bnx2_alloc_mem(struct bnx2 *bp)
{
- int i, status_blk_size, err;
+ int i, err;
struct bnx2_napi *bnapi;
- void *status_blk;
-
- /* Combine status and statistics blocks into one allocation. */
- status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
- if (bp->flags & BNX2_FLAG_MSIX_CAP)
- status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
- BNX2_SBLK_MSIX_ALIGN_SIZE);
- bp->status_stats_size = status_blk_size +
- sizeof(struct statistics_block);
-
- status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
- if (status_blk == NULL)
- goto alloc_mem_err;
bnapi = &bp->bnx2_napi[0];
- bnapi->status_blk.msi = status_blk;
+ bnapi->status_blk.msi = bp->status_blk;
bnapi->hw_tx_cons_ptr =
&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
bnapi->hw_rx_cons_ptr =
@@ -870,7 +892,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
bnapi = &bp->bnx2_napi[i];
- sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
bnapi->status_blk.msix = sblk;
bnapi->hw_tx_cons_ptr =
&sblk->status_tx_quick_consumer_index;
@@ -880,10 +902,6 @@ bnx2_alloc_mem(struct bnx2 *bp)
}
}
- bp->stats_blk = status_blk + status_blk_size;
-
- bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
-
if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
if (bp->ctx_pages == 0)
@@ -8330,6 +8348,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->phy_addr = 1;
+ /* allocate stats_blk */
+ rc = bnx2_alloc_stats_blk(dev);
+ if (rc)
+ goto err_out_unmap;
+
/* Disable WOL support if we are running on a SERDES chip. */
if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
bnx2_get_5709_media(bp);
@@ -8453,6 +8476,8 @@ err_out_disable:
pci_disable_device(pdev);
err_out:
+ kfree(bp->temp_stats_blk);
+
return rc;
}
@@ -8586,6 +8611,7 @@ error:
pci_release_regions(pdev);
pci_disable_device(pdev);
err_free:
+ bnx2_free_stats_blk(dev);
free_netdev(dev);
return rc;
}
@@ -8603,6 +8629,7 @@ bnx2_remove_one(struct pci_dev *pdev)
pci_iounmap(bp->pdev, bp->regview);
+ bnx2_free_stats_blk(dev);
kfree(bp->temp_stats_blk);
if (bp->flags & BNX2_FLAG_AER_ENABLED) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index f92f76c44756..380234d72b95 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6928,6 +6928,7 @@ struct bnx2 {
dma_addr_t status_blk_mapping;
+ void *status_blk;
struct statistics_block *stats_blk;
struct statistics_block *temp_stats_blk;
dma_addr_t stats_blk_mapping;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index be628bd9fb18..d84efcd34fac 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1090,10 +1090,6 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
- info->n_stats = BNX2X_NUM_STATS;
- info->testinfo_len = BNX2X_NUM_TESTS(bp);
- info->eedump_len = bp->common.flash_size;
- info->regdump_len = bnx2x_get_regs_len(dev);
}
static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
new file mode 100644
index 000000000000..97e78e217928
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_BNXT) += bnxt_en.o
+
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
new file mode 100644
index 000000000000..6c2e0c622831
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -0,0 +1,5728 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/cpu_rmap.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#define BNXT_TX_TIMEOUT (5 * HZ)
+
+static const char version[] =
+ "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
+#define BNXT_RX_COPY_THRESH 256
+
+#define BNXT_TX_PUSH_THRESH 92
+
+enum board_idx {
+ BCM57302,
+ BCM57304,
+ BCM57404,
+ BCM57406,
+ BCM57304_VF,
+ BCM57404_VF,
+};
+
+/* indexed by enum above */
+static const struct {
+ char *name;
+} board_info[] = {
+ { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
+ { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+};
+
+static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
+ { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+ { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
+ { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+#ifdef CONFIG_BNXT_SRIOV
+ { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+#endif
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
+
+static const u16 bnxt_vf_req_snif[] = {
+ HWRM_FUNC_CFG,
+ HWRM_PORT_PHY_QCFG,
+ HWRM_CFA_L2_FILTER_ALLOC,
+};
+
+static bool bnxt_vf_pciid(enum board_idx idx)
+{
+ return (idx == BCM57304_VF || idx == BCM57404_VF);
+}
+
+#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
+
+#define BNXT_CP_DB_REARM(db, raw_cons) \
+ writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB(db, raw_cons) \
+ writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB_IRQ_DIS(db) \
+ writel(DB_CP_IRQ_DIS_FLAGS, db)
+
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ barrier();
+
+ return bp->tx_ring_size -
+ ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+static const u16 bnxt_lhint_arr[] = {
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+ TX_BD_FLAGS_LHINT_512_TO_1023,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct tx_bd *txbd;
+ struct tx_bd_ext *txbd1;
+ struct netdev_queue *txq;
+ int i;
+ dma_addr_t mapping;
+ unsigned int length, pad = 0;
+ u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+ u16 prod, last_frag;
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_tx_bd *tx_buf;
+
+ i = skb_get_queue_mapping(skb);
+ if (unlikely(i >= bp->tx_nr_rings)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(dev, i);
+ prod = txr->tx_prod;
+
+ free_size = bnxt_tx_avail(bp, txr);
+ if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+ netif_tx_stop_queue(txq);
+ return NETDEV_TX_BUSY;
+ }
+
+ length = skb->len;
+ len = skb_headlen(skb);
+ last_frag = skb_shinfo(skb)->nr_frags;
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd->tx_bd_opaque = prod;
+
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->skb = skb;
+ tx_buf->nr_frags = last_frag;
+
+ vlan_tag_flags = 0;
+ cfa_action = 0;
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+ skb_vlan_tag_get(skb);
+ /* Currently supports 8021Q, 8021AD vlan offloads
+ * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+ */
+ if (skb->vlan_proto == htons(ETH_P_8021Q))
+ vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+ }
+
+ if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+ struct tx_push_bd *push = txr->tx_push;
+ struct tx_bd *tx_push = &push->txbd1;
+ struct tx_bd_ext *tx_push1 = &push->txbd2;
+ void *pdata = tx_push1 + 1;
+ int j;
+
+ /* Set COAL_NOW to be ready quickly for the next push */
+ tx_push->tx_bd_len_flags_type =
+ cpu_to_le32((length << TX_BD_LEN_SHIFT) |
+ TX_BD_TYPE_LONG_TX_BD |
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER |
+ TX_BD_FLAGS_COAL_NOW |
+ TX_BD_FLAGS_PACKET_END |
+ (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_push1->tx_bd_hsize_lflags =
+ cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ else
+ tx_push1->tx_bd_hsize_lflags = 0;
+
+ tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+
+ skb_copy_from_linear_data(skb, pdata, len);
+ pdata += len;
+ for (j = 0; j < last_frag; j++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+ void *fptr;
+
+ fptr = skb_frag_address_safe(frag);
+ if (!fptr)
+ goto normal_tx;
+
+ memcpy(pdata, fptr, skb_frag_size(frag));
+ pdata += skb_frag_size(frag);
+ }
+
+ memcpy(txbd, tx_push, sizeof(*txbd));
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ memcpy(txbd, tx_push1, sizeof(*txbd));
+ prod = NEXT_TX(prod);
+ push->doorbell =
+ cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+ txr->tx_prod = prod;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ __iowrite64_copy(txr->tx_doorbell, push,
+ (length + sizeof(*push) + 8) / 8);
+
+ tx_buf->is_push = 1;
+
+ goto tx_done;
+ }
+
+normal_tx:
+ if (length < BNXT_MIN_PKT_SIZE) {
+ pad = BNXT_MIN_PKT_SIZE - length;
+ if (skb_pad(skb, pad)) {
+ /* SKB already freed. */
+ tx_buf->skb = NULL;
+ return NETDEV_TX_OK;
+ }
+ length = BNXT_MIN_PKT_SIZE;
+ }
+
+ mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
+ dev_kfree_skb_any(skb);
+ tx_buf->skb = NULL;
+ return NETDEV_TX_OK;
+ }
+
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+ ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ prod = NEXT_TX(prod);
+ txbd1 = (struct tx_bd_ext *)
+ &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd1->tx_bd_hsize_lflags = 0;
+ if (skb_is_gso(skb)) {
+ u32 hdr_len;
+
+ if (skb->encapsulation)
+ hdr_len = skb_inner_network_offset(skb) +
+ skb_inner_network_header_len(skb) +
+ inner_tcp_hdrlen(skb);
+ else
+ hdr_len = skb_transport_offset(skb) +
+ tcp_hdrlen(skb);
+
+ txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+ TX_BD_FLAGS_T_IPID |
+ (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+ length = skb_shinfo(skb)->gso_size;
+ txbd1->tx_bd_mss = cpu_to_le32(length);
+ length += hdr_len;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ txbd1->tx_bd_hsize_lflags =
+ cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ txbd1->tx_bd_mss = 0;
+ }
+
+ length >>= 9;
+ flags |= bnxt_lhint_arr[length];
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+
+ txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+ for (i = 0; i < last_frag; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ len = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+ goto tx_dma_error;
+
+ tx_buf = &txr->tx_buf_ring[prod];
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ flags = len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type =
+ cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+
+ writel(DB_KEY_TX | prod, txr->tx_doorbell);
+ writel(DB_KEY_TX | prod, txr->tx_doorbell);
+
+tx_done:
+
+ mmiowb();
+
+ if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnxt_tx_avail() below, because in
+ * bnxt_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
+ netif_tx_wake_queue(txq);
+ }
+ return NETDEV_TX_OK;
+
+tx_dma_error:
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->skb = NULL;
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ prod = NEXT_TX(prod);
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < last_frag; i++) {
+ prod = NEXT_TX(prod);
+ tx_buf = &txr->tx_buf_ring[prod];
+ dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
+ PCI_DMA_TODEVICE);
+ }
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ int index = bnapi->index;
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+ u16 cons = txr->tx_cons;
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+ unsigned int tx_bytes = 0;
+
+ for (i = 0; i < nr_pkts; i++) {
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct sk_buff *skb;
+ int j, last;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ cons = NEXT_TX(cons);
+ skb = tx_buf->skb;
+ tx_buf->skb = NULL;
+
+ if (tx_buf->is_push) {
+ tx_buf->is_push = 0;
+ goto next_tx_int;
+ }
+
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ last = tx_buf->nr_frags;
+
+ for (j = 0; j < last; j++) {
+ cons = NEXT_TX(cons);
+ tx_buf = &txr->tx_buf_ring[cons];
+ dma_unmap_page(
+ &pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(&skb_shinfo(skb)->frags[j]),
+ PCI_DMA_TODEVICE);
+ }
+
+next_tx_int:
+ cons = NEXT_TX(cons);
+
+ tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ }
+
+ netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
+ txr->tx_cons = cons;
+
+ /* Need to make the tx_cons update visible to bnxt_start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that bnxt_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq)) &&
+ (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ txr->dev_state != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+}
+
+static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+ gfp_t gfp)
+{
+ u8 *data;
+ struct pci_dev *pdev = bp->pdev;
+
+ data = kmalloc(bp->rx_buf_size, gfp);
+ if (!data)
+ return NULL;
+
+ *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
+ bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+
+ if (dma_mapping_error(&pdev->dev, *mapping)) {
+ kfree(data);
+ data = NULL;
+ }
+ return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ u8 *data;
+ dma_addr_t mapping;
+
+ data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ return 0;
+}
+
+static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
+ u8 *data)
+{
+ u16 prod = rxr->rx_prod;
+ struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *cons_bd, *prod_bd;
+
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ prod_rx_buf->data = data;
+
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+
+ prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+ u16 next, max = rxr->rx_agg_bmap_size;
+
+ next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+ if (next >= max)
+ next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+ return next;
+}
+
+static inline int bnxt_alloc_rx_page(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct rx_bd *rxbd =
+ &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf;
+ struct pci_dev *pdev = bp->pdev;
+ struct page *page;
+ dma_addr_t mapping;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+
+ mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
+ rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+
+ rx_agg_buf->page = page;
+ rx_agg_buf->mapping = mapping;
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rxbd->rx_bd_opaque = sw_prod;
+ return 0;
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+ u32 agg_bufs)
+{
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u16 prod = rxr->rx_agg_prod;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+ u32 i;
+
+ for (i = 0; i < agg_bufs; i++) {
+ u16 cons;
+ struct rx_agg_cmp *agg;
+ struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *prod_bd;
+ struct page *page;
+
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = agg->rx_agg_cmp_opaque;
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
+ cons_rx_buf = &rxr->rx_agg_ring[cons];
+
+ /* It is possible for sw_prod to be equal to cons, so
+ * set cons_rx_buf->page to NULL first.
+ */
+ page = cons_rx_buf->page;
+ cons_rx_buf->page = NULL;
+ prod_rx_buf->page = page;
+
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+ prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+ prod_bd->rx_bd_opaque = sw_prod;
+
+ prod = NEXT_RX_AGG(prod);
+ sw_prod = NEXT_RX_AGG(sw_prod);
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+ rxr->rx_agg_prod = prod;
+ rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr, u16 cons,
+ u16 prod, u8 *data, dma_addr_t dma_addr,
+ unsigned int len)
+{
+ int err;
+ struct sk_buff *skb;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+
+ skb = build_skb(data, 0);
+ dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+ if (!skb) {
+ kfree(data);
+ return NULL;
+ }
+
+ skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_put(skb, len);
+ return skb;
+}
+
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct sk_buff *skb, u16 cp_cons,
+ u32 agg_bufs)
+{
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u16 prod = rxr->rx_agg_prod;
+ u32 i;
+
+ for (i = 0; i < agg_bufs; i++) {
+ u16 cons, frag_len;
+ struct rx_agg_cmp *agg;
+ struct bnxt_sw_rx_agg_bd *cons_rx_buf;
+ struct page *page;
+ dma_addr_t mapping;
+
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = agg->rx_agg_cmp_opaque;
+ frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+ RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+ cons_rx_buf = &rxr->rx_agg_ring[cons];
+ skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ /* It is possible for bnxt_alloc_rx_page() to allocate
+ * a sw_prod index that equals the cons index, so we
+ * need to clear the cons entry now.
+ */
+ mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ page = cons_rx_buf->page;
+ cons_rx_buf->page = NULL;
+
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ struct skb_shared_info *shinfo;
+ unsigned int nr_frags;
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = --shinfo->nr_frags;
+ __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
+
+ dev_kfree_skb(skb);
+
+ cons_rx_buf->page = page;
+
+ /* Update prod since possibly some pages have been
+ * allocated already.
+ */
+ rxr->rx_agg_prod = prod;
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+ return NULL;
+ }
+
+ dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ skb->data_len += frag_len;
+ skb->len += frag_len;
+ skb->truesize += PAGE_SIZE;
+
+ prod = NEXT_RX_AGG(prod);
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+ rxr->rx_agg_prod = prod;
+ return skb;
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ u8 agg_bufs, u32 *raw_cons)
+{
+ u16 last;
+ struct rx_agg_cmp *agg;
+
+ *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+ last = RING_CMP(*raw_cons);
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
+ return RX_AGG_CMP_VALID(agg, *raw_cons);
+}
+
+static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ struct bnxt *bp = bnapi->bp;
+ struct pci_dev *pdev = bp->pdev;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&bnapi->napi, len);
+ if (!skb)
+ return NULL;
+
+ dma_sync_single_for_cpu(&pdev->dev, mapping,
+ bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+
+ memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+
+ dma_sync_single_for_device(&pdev->dev, mapping,
+ bp->rx_copy_thresh,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, len);
+ return skb;
+}
+
+static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ u8 agg_id = TPA_START_AGG_ID(tpa_start);
+ u16 cons, prod;
+ struct bnxt_tpa_info *tpa_info;
+ struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *prod_bd;
+ dma_addr_t mapping;
+
+ cons = tpa_start->rx_tpa_start_cmp_opaque;
+ prod = rxr->rx_prod;
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+ tpa_info = &rxr->rx_tpa[agg_id];
+
+ prod_rx_buf->data = tpa_info->data;
+
+ mapping = tpa_info->mapping;
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ tpa_info->data = cons_rx_buf->data;
+ cons_rx_buf->data = NULL;
+ tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+
+ tpa_info->len =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+ RX_TPA_START_CMP_LEN_SHIFT;
+ if (likely(TPA_START_HASH_VALID(tpa_start))) {
+ u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
+
+ tpa_info->hash_type = PKT_HASH_TYPE_L4;
+ tpa_info->gso_type = SKB_GSO_TCPV4;
+ /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+ if (hash_type == 3)
+ tpa_info->gso_type = SKB_GSO_TCPV6;
+ tpa_info->rss_hash =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+ } else {
+ tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+ tpa_info->gso_type = 0;
+ if (netif_msg_rx_err(bp))
+ netdev_warn(bp->dev, "TPA packet without valid hash\n");
+ }
+ tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+ tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+
+ rxr->rx_prod = NEXT_RX(prod);
+ cons = NEXT_RX(cons);
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+ rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+ cons_rx_buf->data = NULL;
+}
+
+static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u16 cp_cons, u32 agg_bufs)
+{
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+}
+
+#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
+#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
+
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ struct tcphdr *th;
+ int payload_off, tcp_opt_len = 0;
+ int len, nw_off;
+
+ NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
+ skb_shinfo(skb)->gso_size =
+ le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+ skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+ payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ if (TPA_END_GRO_TS(tpa_end))
+ tcp_opt_len = 12;
+
+ if (tpa_info->gso_type == SKB_GSO_TCPV4) {
+ struct iphdr *iph;
+
+ nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
+ ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iph = ip_hdr(skb);
+ skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+ } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
+ struct ipv6hdr *iph;
+
+ nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
+ ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iph = ipv6_hdr(skb);
+ skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+ } else {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ tcp_gro_complete(skb);
+
+ if (nw_off) { /* tunnel */
+ struct udphdr *uh = NULL;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |=
+ SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+ }
+#endif
+ return skb;
+}
+
+static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ struct bnxt_napi *bnapi,
+ u32 *raw_cons,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ bool *agg_event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u8 agg_id = TPA_END_AGG_ID(tpa_end);
+ u8 *data, agg_bufs;
+ u16 cp_cons = RING_CMP(*raw_cons);
+ unsigned int len;
+ struct bnxt_tpa_info *tpa_info;
+ dma_addr_t mapping;
+ struct sk_buff *skb;
+
+ tpa_info = &rxr->rx_tpa[agg_id];
+ data = tpa_info->data;
+ prefetch(data);
+ len = tpa_info->len;
+ mapping = tpa_info->mapping;
+
+ agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+ return ERR_PTR(-EBUSY);
+
+ *agg_event = true;
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+
+ if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+ agg_bufs, (int)MAX_SKB_FRAGS);
+ return NULL;
+ }
+
+ if (len <= bp->rx_copy_thresh) {
+ skb = bnxt_copy_skb(bnapi, data, len, mapping);
+ if (!skb) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+ } else {
+ u8 *new_data;
+ dma_addr_t new_mapping;
+
+ new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+ if (!new_data) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+
+ tpa_info->data = new_data;
+ tpa_info->mapping = new_mapping;
+
+ skb = build_skb(data, 0);
+ dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (!skb) {
+ kfree(data);
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+ skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_put(skb, len);
+ }
+
+ if (agg_bufs) {
+ skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ if (!skb) {
+ /* Page reuse already handled by bnxt_rx_pages(). */
+ return NULL;
+ }
+ }
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+ skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ netdev_features_t features = skb->dev->features;
+ u16 vlan_proto = tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+ if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ vlan_proto == ETH_P_8021Q) ||
+ ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_proto == ETH_P_8021AD)) {
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+ tpa_info->metadata &
+ RX_CMP_FLAGS2_METADATA_VID_MASK);
+ }
+ }
+
+ skb_checksum_none_assert(skb);
+ if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level =
+ (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+ }
+
+ if (TPA_END_GRO(tpa_end))
+ skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+
+ return skb;
+}
+
+/* returns the following:
+ * 1 - 1 packet successfully received
+ * 0 - successful TPA_START, packet not completed yet
+ * -EBUSY - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO - packet aborted due to hw error indicated in BD
+ */
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+ bool *agg_event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct net_device *dev = bp->dev;
+ struct rx_cmp *rxcmp;
+ struct rx_cmp_ext *rxcmp1;
+ u32 tmp_raw_cons = *raw_cons;
+ u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct bnxt_sw_rx_bd *rx_buf;
+ unsigned int len;
+ u8 *data, agg_bufs, cmp_type;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ int rc = 0;
+
+ rxcmp = (struct rx_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ prod = rxr->rx_prod;
+
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
+ bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+ (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+ goto next_rx_no_prod;
+
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+ (struct rx_tpa_end_cmp *)rxcmp,
+ (struct rx_tpa_end_cmp_ext *)rxcmp1,
+ agg_event);
+
+ if (unlikely(IS_ERR(skb)))
+ return -EBUSY;
+
+ rc = -ENOMEM;
+ if (likely(skb)) {
+ skb_record_rx_queue(skb, bnapi->index);
+ skb_mark_napi_id(skb, &bnapi->napi);
+ if (bnxt_busy_polling(bnapi))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&bnapi->napi, skb);
+ rc = 1;
+ }
+ goto next_rx_no_prod;
+ }
+
+ cons = rxcmp->rx_cmp_opaque;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ prefetch(data);
+
+ agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
+ RX_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+
+ cp_cons = NEXT_CMP(cp_cons);
+ *agg_event = true;
+ }
+
+ rx_buf->data = NULL;
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+
+ rc = -EIO;
+ goto next_rx;
+ }
+
+ len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+ dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+ if (len <= bp->rx_copy_thresh) {
+ skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ } else {
+ skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+
+ if (agg_bufs) {
+ skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+
+ if (RX_CMP_HASH_VALID(rxcmp)) {
+ u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+ enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+
+ /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+ if (hash_type != 1 && hash_type != 3)
+ type = PKT_HASH_TYPE_L3;
+ skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
+ netdev_features_t features = skb->dev->features;
+ u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+ if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ vlan_proto == ETH_P_8021Q) ||
+ ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_proto == ETH_P_8021AD))
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+ meta_data &
+ RX_CMP_FLAGS2_METADATA_VID_MASK);
+ }
+
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ } else {
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
+ cpr->rx_l4_csum_errors++;
+ }
+
+ skb_record_rx_queue(skb, bnapi->index);
+ skb_mark_napi_id(skb, &bnapi->napi);
+ if (bnxt_busy_polling(bnapi))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&bnapi->napi, skb);
+ rc = 1;
+
+next_rx:
+ rxr->rx_prod = NEXT_RX(prod);
+
+next_rx_no_prod:
+ *raw_cons = tmp_raw_cons;
+
+ return rc;
+}
+
+static int bnxt_async_event_process(struct bnxt *bp,
+ struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+
+ /* TODO CHIMP_FW: Define event id's for link change, error etc */
+ switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+ set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ break;
+ default:
+ netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
+ event_id);
+ break;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+{
+ u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
+ struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+ struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
+ (struct hwrm_fwd_req_cmpl *)txcmp;
+
+ switch (cmpl_type) {
+ case CMPL_BASE_TYPE_HWRM_DONE:
+ seq_id = le16_to_cpu(h_cmpl->sequence_id);
+ if (seq_id == bp->hwrm_intr_seq_id)
+ bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+ else
+ netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_FWD_REQ:
+ vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
+
+ if ((vf_id < bp->pf.first_vf_id) ||
+ (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
+ netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
+ vf_id);
+ return -EINVAL;
+ }
+
+ set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
+ set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ bnxt_async_event_process(bp,
+ (struct hwrm_async_event_cmpl *)txcmp);
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+{
+ struct bnxt_napi *bnapi = dev_instance;
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 cons = RING_CMP(cpr->cp_raw_cons);
+
+ prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ u32 raw_cons = cpr->cp_raw_cons;
+ u16 cons = RING_CMP(raw_cons);
+ struct tx_cmp *txcmp;
+
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ return TX_CMP_VALID(txcmp, raw_cons);
+}
+
+#define CAG_LEGACY_INT_STATUS 0x2014
+
+static irqreturn_t bnxt_inta(int irq, void *dev_instance)
+{
+ struct bnxt_napi *bnapi = dev_instance;
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 cons = RING_CMP(cpr->cp_raw_cons);
+ u32 int_status;
+
+ prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+
+ if (!bnxt_has_work(bp, cpr)) {
+ int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS);
+ /* return if erroneous interrupt */
+ if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
+ return IRQ_NONE;
+ }
+
+ /* disable ring IRQ */
+ BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+
+ /* Return here if interrupt is shared and is disabled. */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0))
+ return IRQ_HANDLED;
+
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 raw_cons = cpr->cp_raw_cons;
+ u32 cons;
+ int tx_pkts = 0;
+ int rx_pkts = 0;
+ bool rx_event = false;
+ bool agg_event = false;
+ struct tx_cmp *txcmp;
+
+ while (1) {
+ int rc;
+
+ cons = RING_CMP(raw_cons);
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!TX_CMP_VALID(txcmp, raw_cons))
+ break;
+
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+ tx_pkts++;
+ /* return full budget so NAPI will complete. */
+ if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ rx_pkts = budget;
+ } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ if (likely(rc >= 0))
+ rx_pkts += rc;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
+ rx_event = true;
+ } else if (unlikely((TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_DONE) ||
+ (TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
+ (TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+ bnxt_hwrm_handler(bp, txcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+
+ if (rx_pkts == budget)
+ break;
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+ /* ACK completion ring before freeing tx ring and producing new
+ * buffers in rx/agg rings to prevent overflowing the completion
+ * ring.
+ */
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+ if (tx_pkts)
+ bnxt_tx_int(bp, bnapi, tx_pkts);
+
+ if (rx_event) {
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ if (agg_event) {
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ }
+ }
+ return rx_pkts;
+}
+
+static int bnxt_poll(struct napi_struct *napi, int budget)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int work_done = 0;
+
+ if (!bnxt_lock_napi(bnapi))
+ return budget;
+
+ while (1) {
+ work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+
+ if (work_done >= budget)
+ break;
+
+ if (!bnxt_has_work(bp, cpr)) {
+ napi_complete(napi);
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ break;
+ }
+ }
+ mmiowb();
+ bnxt_unlock_napi(bnapi);
+ return work_done;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int bnxt_busy_poll(struct napi_struct *napi)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int rx_work, budget = 4;
+
+ if (atomic_read(&bp->intr_sem) != 0)
+ return LL_FLUSH_FAILED;
+
+ if (!bnxt_lock_poll(bnapi))
+ return LL_FLUSH_BUSY;
+
+ rx_work = bnxt_poll_work(bp, bnapi, budget);
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+ bnxt_unlock_poll(bnapi);
+ return rx_work;
+}
+#endif
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+ int i, max_idx;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ max_idx = bp->tx_nr_pages * TX_DESC_CNT;
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ int j;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+ for (j = 0; j < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sk_buff *skb = tx_buf->skb;
+ int k, last;
+
+ if (!skb) {
+ j++;
+ continue;
+ }
+
+ tx_buf->skb = NULL;
+
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ j += 2;
+ continue;
+ }
+
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ last = tx_buf->nr_frags;
+ j += 2;
+ for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+ tx_buf = &txr->tx_buf_ring[j];
+ dma_unmap_page(
+ &pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ }
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+ int i, max_idx, max_agg_idx;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ max_idx = bp->rx_nr_pages * RX_DESC_CNT;
+ max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ int j;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+
+ if (rxr->rx_tpa) {
+ for (j = 0; j < MAX_TPA; j++) {
+ struct bnxt_tpa_info *tpa_info =
+ &rxr->rx_tpa[j];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single(
+ &pdev->dev,
+ dma_unmap_addr(tpa_info, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ tpa_info->data = NULL;
+
+ kfree(data);
+ }
+ }
+
+ for (j = 0; j < max_idx; j++) {
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+ u8 *data = rx_buf->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ rx_buf->data = NULL;
+
+ kfree(data);
+ }
+
+ for (j = 0; j < max_agg_idx; j++) {
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf =
+ &rxr->rx_agg_ring[j];
+ struct page *page = rx_agg_buf->page;
+
+ if (!page)
+ continue;
+
+ dma_unmap_page(&pdev->dev,
+ dma_unmap_addr(rx_agg_buf, mapping),
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+ rx_agg_buf->page = NULL;
+ __clear_bit(j, rxr->rx_agg_bmap);
+
+ __free_page(page);
+ }
+ }
+}
+
+static void bnxt_free_skbs(struct bnxt *bp)
+{
+ bnxt_free_tx_skbs(bp);
+ bnxt_free_rx_skbs(bp);
+}
+
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+
+ for (i = 0; i < ring->nr_pages; i++) {
+ if (!ring->pg_arr[i])
+ continue;
+
+ dma_free_coherent(&pdev->dev, ring->page_size,
+ ring->pg_arr[i], ring->dma_arr[i]);
+
+ ring->pg_arr[i] = NULL;
+ }
+ if (ring->pg_tbl) {
+ dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
+ ring->pg_tbl, ring->pg_tbl_map);
+ ring->pg_tbl = NULL;
+ }
+ if (ring->vmem_size && *ring->vmem) {
+ vfree(*ring->vmem);
+ *ring->vmem = NULL;
+ }
+}
+
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ int i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (ring->nr_pages > 1) {
+ ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
+ ring->nr_pages * 8,
+ &ring->pg_tbl_map,
+ GFP_KERNEL);
+ if (!ring->pg_tbl)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ring->nr_pages; i++) {
+ ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ ring->page_size,
+ &ring->dma_arr[i],
+ GFP_KERNEL);
+ if (!ring->pg_arr[i])
+ return -ENOMEM;
+
+ if (ring->nr_pages > 1)
+ ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+ }
+
+ if (ring->vmem_size) {
+ *ring->vmem = vzalloc(ring->vmem_size);
+ if (!(*ring->vmem))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void bnxt_free_rx_rings(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, ring);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_rx_rings(struct bnxt *bp)
+{
+ int i, rc, agg_rings = 0, tpa_rings = 0;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ agg_rings = 1;
+
+ if (bp->flags & BNXT_FLAG_TPA)
+ tpa_rings = 1;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ if (agg_rings) {
+ u16 mem_size;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ if (tpa_rings) {
+ rxr->rx_tpa = kcalloc(MAX_TPA,
+ sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+ }
+ }
+ }
+ return 0;
+}
+
+static void bnxt_free_tx_rings(struct bnxt *bp)
+{
+ int i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+
+ if (txr->tx_push) {
+ dma_free_coherent(&pdev->dev, bp->tx_push_size,
+ txr->tx_push, txr->tx_push_mapping);
+ txr->tx_push = NULL;
+ }
+
+ ring = &txr->tx_ring_struct;
+
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_tx_rings(struct bnxt *bp)
+{
+ int i, j, rc;
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->tx_push_size = 0;
+ if (bp->tx_push_thresh) {
+ int push_size;
+
+ push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
+ bp->tx_push_thresh);
+
+ if (push_size > 128) {
+ push_size = 0;
+ bp->tx_push_thresh = 0;
+ }
+
+ bp->tx_push_size = push_size;
+ }
+
+ for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+ ring = &txr->tx_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ if (bp->tx_push_size) {
+ struct tx_bd *txbd;
+ dma_addr_t mapping;
+
+ /* One pre-allocated DMA buffer to backup
+ * TX push operation
+ */
+ txr->tx_push = dma_alloc_coherent(&pdev->dev,
+ bp->tx_push_size,
+ &txr->tx_push_mapping,
+ GFP_KERNEL);
+
+ if (!txr->tx_push)
+ return -ENOMEM;
+
+ txbd = &txr->tx_push->txbd1;
+
+ mapping = txr->tx_push_mapping +
+ sizeof(struct tx_push_bd);
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
+ }
+ ring->queue_id = bp->q_info[j].queue_id;
+ if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+ j++;
+ }
+ return 0;
+}
+
+static void bnxt_free_cp_rings(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_cp_rings(struct bnxt *bp)
+{
+ int i, rc;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void bnxt_init_ring_struct(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+ ring->nr_pages = bp->cp_nr_pages;
+ ring->page_size = HW_CMPD_RING_SIZE;
+ ring->pg_arr = (void **)cpr->cp_desc_ring;
+ ring->dma_arr = cpr->cp_desc_mapping;
+ ring->vmem_size = 0;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+ ring->nr_pages = bp->rx_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)rxr->rx_desc_ring;
+ ring->dma_arr = rxr->rx_desc_mapping;
+ ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ ring->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->nr_pages = bp->rx_agg_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ ring->dma_arr = rxr->rx_agg_desc_mapping;
+ ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ ring->vmem = (void **)&rxr->rx_agg_ring;
+
+ txr = &bnapi->tx_ring;
+ ring = &txr->tx_ring_struct;
+ ring->nr_pages = bp->tx_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)txr->tx_desc_ring;
+ ring->dma_arr = txr->tx_desc_mapping;
+ ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ ring->vmem = (void **)&txr->tx_buf_ring;
+ }
+}
+
+static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
+{
+ int i;
+ u32 prod;
+ struct rx_bd **rx_buf_ring;
+
+ rx_buf_ring = (struct rx_bd **)ring->pg_arr;
+ for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+ int j;
+ struct rx_bd *rxbd;
+
+ rxbd = rx_buf_ring[i];
+ if (!rxbd)
+ continue;
+
+ for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+ rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+ rxbd->rx_bd_opaque = prod;
+ }
+ }
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+ u32 prod, type;
+ int i;
+
+ if (!bnapi)
+ return -EINVAL;
+
+ type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+ bnxt_init_rxbd_pages(ring, type);
+
+ prod = rxr->rx_prod;
+ for (i = 0; i < bp->rx_ring_size; i++) {
+ if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+ netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ ring_nr, i, bp->rx_ring_size);
+ break;
+ }
+ prod = NEXT_RX(prod);
+ }
+ rxr->rx_prod = prod;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return 0;
+
+ ring = &rxr->rx_agg_ring_struct;
+
+ type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnxt_init_rxbd_pages(ring, type);
+
+ prod = rxr->rx_agg_prod;
+ for (i = 0; i < bp->rx_agg_ring_size; i++) {
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+ netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+ ring_nr, i, bp->rx_ring_size);
+ break;
+ }
+ prod = NEXT_RX_AGG(prod);
+ }
+ rxr->rx_agg_prod = prod;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ if (rxr->rx_tpa) {
+ u8 *data;
+ dma_addr_t mapping;
+
+ for (i = 0; i < MAX_TPA; i++) {
+ data = __bnxt_alloc_rx_data(bp, &mapping,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].mapping = mapping;
+ }
+ } else {
+ netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int bnxt_init_rx_rings(struct bnxt *bp)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ rc = bnxt_init_one_rx_ring(bp, i);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int bnxt_init_tx_rings(struct bnxt *bp)
+{
+ u16 i;
+
+ bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
+ MAX_SKB_FRAGS + 1);
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ return 0;
+}
+
+static void bnxt_free_ring_grps(struct bnxt *bp)
+{
+ kfree(bp->grp_info);
+ bp->grp_info = NULL;
+}
+
+static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
+{
+ int i;
+
+ if (irq_re_init) {
+ bp->grp_info = kcalloc(bp->cp_nr_rings,
+ sizeof(struct bnxt_ring_grp_info),
+ GFP_KERNEL);
+ if (!bp->grp_info)
+ return -ENOMEM;
+ }
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (irq_re_init)
+ bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+ bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ return 0;
+}
+
+static void bnxt_free_vnics(struct bnxt *bp)
+{
+ kfree(bp->vnic_info);
+ bp->vnic_info = NULL;
+ bp->nr_vnics = 0;
+}
+
+static int bnxt_alloc_vnics(struct bnxt *bp)
+{
+ int num_vnics = 1;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (bp->flags & BNXT_FLAG_RFS)
+ num_vnics += bp->rx_nr_rings;
+#endif
+
+ bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
+ GFP_KERNEL);
+ if (!bp->vnic_info)
+ return -ENOMEM;
+
+ bp->nr_vnics = num_vnics;
+ return 0;
+}
+
+static void bnxt_init_vnics(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
+
+ if (bp->vnic_info[i].rss_hash_key) {
+ if (i == 0)
+ prandom_bytes(vnic->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ else
+ memcpy(vnic->rss_hash_key,
+ bp->vnic_info[0].rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ }
+ }
+}
+
+static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
+{
+ int pages;
+
+ pages = ring_size / desc_per_pg;
+
+ if (!pages)
+ return 1;
+
+ pages++;
+
+ while (pages & (pages - 1))
+ pages++;
+
+ return pages;
+}
+
+static void bnxt_set_tpa_flags(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_TPA;
+ if (bp->dev->features & NETIF_F_LRO)
+ bp->flags |= BNXT_FLAG_LRO;
+ if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ bp->flags |= BNXT_FLAG_GRO;
+}
+
+/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
+ * be set on entry.
+ */
+void bnxt_set_ring_params(struct bnxt *bp)
+{
+ u32 ring_size, rx_size, rx_space;
+ u32 agg_factor = 0, agg_ring_size = 0;
+
+ /* 8 for CRC and VLAN */
+ rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
+ ring_size = bp->rx_ring_size;
+ bp->rx_agg_ring_size = 0;
+ bp->rx_agg_nr_pages = 0;
+
+ if (bp->flags & BNXT_FLAG_TPA)
+ agg_factor = 4;
+
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ if (rx_space > PAGE_SIZE) {
+ u32 jumbo_factor;
+
+ bp->flags |= BNXT_FLAG_JUMBO;
+ jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+ if (jumbo_factor > agg_factor)
+ agg_factor = jumbo_factor;
+ }
+ agg_ring_size = ring_size * agg_factor;
+
+ if (agg_ring_size) {
+ bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
+ RX_DESC_CNT);
+ if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+ u32 tmp = agg_ring_size;
+
+ bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+ agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+ netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
+ tmp, agg_ring_size);
+ }
+ bp->rx_agg_ring_size = agg_ring_size;
+ bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+ rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+
+ bp->rx_buf_use_size = rx_size;
+ bp->rx_buf_size = rx_space;
+
+ bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
+ bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
+
+ ring_size = bp->tx_ring_size;
+ bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
+ bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
+
+ ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+ bp->cp_ring_size = ring_size;
+
+ bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
+ if (bp->cp_nr_pages > MAX_CP_PAGES) {
+ bp->cp_nr_pages = MAX_CP_PAGES;
+ bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+ netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
+ ring_size, bp->cp_ring_size);
+ }
+ bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
+ bp->cp_ring_mask = bp->cp_bit - 1;
+}
+
+static void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_vnic_info *vnic;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->vnic_info)
+ return;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ kfree(vnic->fw_grp_ids);
+ vnic->fw_grp_ids = NULL;
+
+ kfree(vnic->uc_list);
+ vnic->uc_list = NULL;
+
+ if (vnic->mc_list) {
+ dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+ vnic->mc_list, vnic->mc_list_mapping);
+ vnic->mc_list = NULL;
+ }
+
+ if (vnic->rss_table) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ vnic->rss_table = NULL;
+ }
+
+ vnic->rss_hash_key = NULL;
+ vnic->flags = 0;
+ }
+}
+
+static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+ int i, rc = 0, size;
+ struct bnxt_vnic_info *vnic;
+ struct pci_dev *pdev = bp->pdev;
+ int max_rings;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
+ int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+ if (mem_size > 0) {
+ vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+ if (!vnic->uc_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+ if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
+ vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
+ vnic->mc_list =
+ dma_alloc_coherent(&pdev->dev,
+ vnic->mc_list_size,
+ &vnic->mc_list_mapping,
+ GFP_KERNEL);
+ if (!vnic->mc_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ max_rings = bp->rx_nr_rings;
+ else
+ max_rings = 1;
+
+ vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
+ if (!vnic->fw_grp_ids) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Allocate rss table and hash key */
+ vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ }
+ return 0;
+
+out:
+ return rc;
+}
+
+static void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+ bp->hwrm_cmd_resp_dma_addr);
+
+ bp->hwrm_cmd_resp_addr = NULL;
+ if (bp->hwrm_dbg_resp_addr) {
+ dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
+ bp->hwrm_dbg_resp_addr,
+ bp->hwrm_dbg_resp_dma_addr);
+
+ bp->hwrm_dbg_resp_addr = NULL;
+ }
+}
+
+static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &bp->hwrm_cmd_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_cmd_resp_addr)
+ return -ENOMEM;
+ bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
+ HWRM_DBG_REG_BUF_SIZE,
+ &bp->hwrm_dbg_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_dbg_resp_addr)
+ netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
+
+ return 0;
+}
+
+static void bnxt_free_stats(struct bnxt *bp)
+{
+ u32 size, i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ size = sizeof(struct ctx_hw_stats);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ if (cpr->hw_stats) {
+ dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
+ cpr->hw_stats_map);
+ cpr->hw_stats = NULL;
+ }
+ }
+}
+
+static int bnxt_alloc_stats(struct bnxt *bp)
+{
+ u32 size, i;
+ struct pci_dev *pdev = bp->pdev;
+
+ size = sizeof(struct ctx_hw_stats);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
+ &cpr->hw_stats_map,
+ GFP_KERNEL);
+ if (!cpr->hw_stats)
+ return -ENOMEM;
+
+ cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ return 0;
+}
+
+static void bnxt_clear_ring_indices(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_tx_ring_info *txr;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ cpr->cp_raw_cons = 0;
+
+ txr = &bnapi->tx_ring;
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+
+ rxr = &bnapi->rx_ring;
+ rxr->rx_prod = 0;
+ rxr->rx_agg_prod = 0;
+ rxr->rx_sw_agg_prod = 0;
+ }
+}
+
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i;
+
+ /* Under rtnl_lock and all our NAPIs have been disabled. It's
+ * safe to delete the hash table.
+ */
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_ntuple_filter *fltr;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ hlist_del(&fltr->hash);
+ kfree(fltr);
+ }
+ }
+ if (irq_reinit) {
+ kfree(bp->ntp_fltr_bmap);
+ bp->ntp_fltr_bmap = NULL;
+ }
+ bp->ntp_fltr_count = 0;
+#endif
+}
+
+static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc = 0;
+
+ if (!(bp->flags & BNXT_FLAG_RFS))
+ return 0;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
+
+ bp->ntp_fltr_count = 0;
+ bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ GFP_KERNEL);
+
+ if (!bp->ntp_fltr_bmap)
+ rc = -ENOMEM;
+
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_free_vnic_attributes(bp);
+ bnxt_free_tx_rings(bp);
+ bnxt_free_rx_rings(bp);
+ bnxt_free_cp_rings(bp);
+ bnxt_free_ntp_fltrs(bp, irq_re_init);
+ if (irq_re_init) {
+ bnxt_free_stats(bp);
+ bnxt_free_ring_grps(bp);
+ bnxt_free_vnics(bp);
+ kfree(bp->bnapi);
+ bp->bnapi = NULL;
+ } else {
+ bnxt_clear_ring_indices(bp);
+ }
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
+{
+ int i, rc, size, arr_size;
+ void *bnapi;
+
+ if (irq_re_init) {
+ /* Allocate bnapi mem pointer array and mem block for
+ * all queues
+ */
+ arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
+ bp->cp_nr_rings);
+ size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
+ bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
+ if (!bnapi)
+ return -ENOMEM;
+
+ bp->bnapi = bnapi;
+ bnapi += arr_size;
+ for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
+ bp->bnapi[i] = bnapi;
+ bp->bnapi[i]->index = i;
+ bp->bnapi[i]->bp = bp;
+ }
+
+ rc = bnxt_alloc_stats(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_ntp_fltrs(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_vnics(bp);
+ if (rc)
+ goto alloc_mem_err;
+ }
+
+ bnxt_init_ring_struct(bp);
+
+ rc = bnxt_alloc_rx_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_tx_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_cp_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ rc = bnxt_alloc_vnic_attributes(bp);
+ if (rc)
+ goto alloc_mem_err;
+ return 0;
+
+alloc_mem_err:
+ bnxt_free_mem(bp, true);
+ return rc;
+}
+
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
+ u16 cmpl_ring, u16 target_id)
+{
+ struct hwrm_cmd_req_hdr *req = request;
+
+ req->cmpl_ring_req_type =
+ cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
+ req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ int i, intr_process, rc;
+ struct hwrm_cmd_req_hdr *req = msg;
+ u32 *data = msg;
+ __le32 *resp_len, *valid;
+ u16 cp_ring_id, len = 0;
+ struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+ req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
+ memset(resp, 0, PAGE_SIZE);
+ cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
+ HWRM_CMPL_RING_MASK) >>
+ HWRM_CMPL_RING_SFT;
+ intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bp->bar0, data, msg_len / 4);
+
+ /* currently supports only one outstanding message */
+ if (intr_process)
+ bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
+ HWRM_SEQ_ID_MASK;
+
+ /* Ring channel doorbell */
+ writel(1, bp->bar0 + 0x100);
+
+ i = 0;
+ if (intr_process) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+ i++ < timeout) {
+ usleep_range(600, 800);
+ }
+
+ if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+ netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+ req->cmpl_ring_req_type);
+ return -1;
+ }
+ } else {
+ /* Check if response len is updated */
+ resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+ for (i = 0; i < timeout; i++) {
+ len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+ HWRM_RESP_LEN_SFT;
+ if (len)
+ break;
+ usleep_range(600, 800);
+ }
+
+ if (i >= timeout) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+ timeout, req->cmpl_ring_req_type,
+ req->target_id_seq_id, *resp_len);
+ return -1;
+ }
+
+ /* Last word of resp contains valid bit */
+ valid = bp->hwrm_cmd_resp_addr + len - 4;
+ for (i = 0; i < timeout; i++) {
+ if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
+ break;
+ usleep_range(600, 800);
+ }
+
+ if (i >= timeout) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+ timeout, req->cmpl_ring_req_type,
+ req->target_id_seq_id, len, *valid);
+ return -1;
+ }
+ }
+
+ rc = le16_to_cpu(resp->error_code);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+ le16_to_cpu(resp->req_type),
+ le16_to_cpu(resp->seq_id), rc);
+ return rc;
+ }
+ return 0;
+}
+
+int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ int rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, msg, msg_len, timeout);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_drv_rgtr_input req = {0};
+ int i;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+ req.enables =
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+ /* TODO: current async event fwd bits are not defined and the firmware
+ * only checks if it is non-zero to enable async event forwarding
+ */
+ req.async_event_fwd[0] |= cpu_to_le32(1);
+ req.os_type = cpu_to_le16(1);
+ req.ver_maj = DRV_VER_MAJ;
+ req.ver_min = DRV_VER_MIN;
+ req.ver_upd = DRV_VER_UPD;
+
+ if (BNXT_PF(bp)) {
+ unsigned long vf_req_snif_bmap[4];
+ u32 *data = (u32 *)vf_req_snif_bmap;
+
+ memset(vf_req_snif_bmap, 0, 32);
+ for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
+ __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
+
+ for (i = 0; i < 8; i++) {
+ req.vf_req_fwd[i] = cpu_to_le32(*data);
+ data++;
+ }
+ req.enables |=
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
+ }
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
+{
+ u32 rc = 0;
+ struct hwrm_tunnel_dst_port_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
+ req.tunnel_type = tunnel_type;
+
+ switch (tunnel_type) {
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
+ req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+ break;
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
+ req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+ break;
+ default:
+ break;
+ }
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
+ rc);
+ return rc;
+}
+
+static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
+ u8 tunnel_type)
+{
+ u32 rc = 0;
+ struct hwrm_tunnel_dst_port_alloc_input req = {0};
+ struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_val = port;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
+ rc);
+ goto err_out;
+ }
+
+ if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
+ bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+
+ else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
+ bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+err_out:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
+{
+ struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
+ req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ req.mask = cpu_to_le32(vnic->rx_mask);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
+{
+ struct hwrm_cfa_ntuple_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
+ req.ntuple_filter_id = fltr->filter_id;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#define BNXT_NTP_FLTR_FLAGS \
+ (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
+
+static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
+{
+ int rc = 0;
+ struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
+ struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
+ req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
+
+ req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+
+ req.ethertype = htons(ETH_P_IP);
+ memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+ req.ipaddr_type = 4;
+ req.ip_protocol = keys->basic.ip_proto;
+
+ req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+
+ req.src_port = keys->ports.src;
+ req.src_port_mask = cpu_to_be16(0xffff);
+ req.dst_port = keys->ports.dst;
+ req.dst_port_mask = cpu_to_be16(0xffff);
+
+ req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ fltr->filter_id = resp->ntuple_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+#endif
+
+static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
+ u8 *mac_addr)
+{
+ u32 rc = 0;
+ struct hwrm_cfa_l2_filter_alloc_input req = {0};
+ struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
+ req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
+ CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+ req.enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ memcpy(req.l2_addr, mac_addr, ETH_ALEN);
+ req.l2_addr_mask[0] = 0xff;
+ req.l2_addr_mask[1] = 0xff;
+ req.l2_addr_mask[2] = 0xff;
+ req.l2_addr_mask[3] = 0xff;
+ req.l2_addr_mask[4] = 0xff;
+ req.l2_addr_mask[5] = 0xff;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
+ resp->l2_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+{
+ u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
+ int rc = 0;
+
+ /* Any associated ntuple filters will also be cleared by firmware. */
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < num_of_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ for (j = 0; j < vnic->uc_filter_count; j++) {
+ struct hwrm_cfa_l2_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req,
+ HWRM_CFA_L2_FILTER_FREE, -1, -1);
+
+ req.l2_filter_id = vnic->fw_l2_filter_id[j];
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ }
+ vnic->uc_filter_count = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_tpa_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+
+ if (tpa_flags) {
+ u16 mss = bp->dev->mtu - 40;
+ u32 nsegs, n, segs = 0, flags;
+
+ flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+ if (tpa_flags & BNXT_FLAG_GRO)
+ flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+ req.flags = cpu_to_le32(flags);
+
+ req.enables =
+ cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+ VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
+
+ /* Number of segs are log2 units, and first packet is not
+ * included as part of this units.
+ */
+ if (mss <= PAGE_SIZE) {
+ n = PAGE_SIZE / mss;
+ nsegs = (MAX_SKB_FRAGS - 1) * n;
+ } else {
+ n = mss / PAGE_SIZE;
+ if (mss & (PAGE_SIZE - 1))
+ n++;
+ nsegs = (MAX_SKB_FRAGS - n) / n;
+ }
+
+ segs = ilog2(nsegs);
+ req.max_agg_segs = cpu_to_le16(segs);
+ req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+ }
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+ u32 i, j, max_rings;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_rss_cfg_input req = {0};
+
+ if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+ if (set_rss) {
+ vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
+ BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
+ BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
+ BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+
+ req.hash_type = cpu_to_le32(vnic->hash_type);
+
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ max_rings = bp->rx_nr_rings;
+ else
+ max_rings = 1;
+
+ /* Fill the RSS indirection table with ring group ids */
+ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
+ if (j == max_rings)
+ j = 0;
+ vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+ }
+
+ req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr =
+ cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ }
+ req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_plcmodes_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
+ req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req.enables =
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
+ VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ /* thresholds not implemented in firmware yet */
+ req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+ req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
+ req.rss_cos_lb_ctx_id =
+ cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
+
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, i);
+ }
+ bp->rsscos_nr_ctxs = 0;
+}
+
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
+{
+ int rc;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
+ -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
+ le16_to_cpu(resp->rss_cos_lb_ctx_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+{
+ int grp_idx = 0;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+ /* Only RSS support for now TBD: COS & LB */
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
+ VNIC_CFG_REQ_ENABLES_RSS_RULE);
+ req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ req.cos_rule = cpu_to_le16(0xffff);
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ grp_idx = 0;
+ else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
+ grp_idx = vnic_id - 1;
+
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+
+ req.lb_rule = cpu_to_le16(0xffff);
+ req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN);
+
+ if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+ req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+{
+ u32 rc = 0;
+
+ if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+ struct hwrm_vnic_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
+ req.vnic_id =
+ cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+ bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+ }
+ return rc;
+}
+
+static void bnxt_hwrm_vnic_free(struct bnxt *bp)
+{
+ u16 i;
+
+ for (i = 0; i < bp->nr_vnics; i++)
+ bnxt_hwrm_vnic_free_one(bp, i);
+}
+
+static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
+ u16 end_grp_id)
+{
+ u32 rc = 0, i, j;
+ struct hwrm_vnic_alloc_input req = {0};
+ struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ /* map ring groups to this vnic */
+ for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
+ if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
+ netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
+ j, (end_grp_id - start_grp_id));
+ break;
+ }
+ bp->vnic_info[vnic_id].fw_grp_ids[j] =
+ bp->grp_info[i].fw_grp_id;
+ }
+
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ if (vnic_id == 0)
+ req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
+{
+ u16 i;
+ u32 rc = 0;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct hwrm_ring_grp_alloc_input req = {0};
+ struct hwrm_ring_grp_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+
+ req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+ req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
+ req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
+ req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+{
+ u16 i;
+ u32 rc = 0;
+ struct hwrm_ring_grp_free_input req = {0};
+
+ if (!bp->grp_info)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
+ continue;
+ req.ring_group_id =
+ cpu_to_le32(bp->grp_info[i].fw_grp_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+ struct bnxt_ring_struct *ring,
+ u32 ring_type, u32 map_index,
+ u32 stats_ctx_id)
+{
+ int rc = 0, err = 0;
+ struct hwrm_ring_alloc_input req = {0};
+ struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 ring_id;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+
+ req.enables = 0;
+ if (ring->nr_pages > 1) {
+ req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+ /* Page size is in log2 units */
+ req.page_size = BNXT_PAGE_SHIFT;
+ req.page_tbl_depth = 1;
+ } else {
+ req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
+ }
+ req.fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req.logical_id = cpu_to_le16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+ req.cmpl_ring_id =
+ cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
+ req.length = cpu_to_le32(bp->tx_ring_mask + 1);
+ req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
+ req.queue_id = cpu_to_le16(ring->queue_id);
+ break;
+ case HWRM_RING_ALLOC_RX:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+ req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ break;
+ default:
+ netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
+ ring_type);
+ return -1;
+ }
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ err = le16_to_cpu(resp->error_code);
+ ring_id = le16_to_cpu(resp->ring_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || err) {
+ switch (ring_type) {
+ case RING_FREE_REQ_RING_TYPE_CMPL:
+ netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ case RING_FREE_REQ_RING_TYPE_RX:
+ netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ case RING_FREE_REQ_RING_TYPE_TX:
+ netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ default:
+ netdev_err(bp->dev, "Invalid ring\n");
+ return -1;
+ }
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+}
+
+static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+{
+ int i, rc = 0;
+
+ if (bp->cp_nr_rings) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_CMPL, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell = bp->bar1 + i * 0x80;
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+
+ if (bp->tx_nr_rings) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_TX, i,
+ fw_stats_ctx);
+ if (rc)
+ goto err_out;
+ txr->tx_doorbell = bp->bar1 + i * 0x80;
+ }
+ }
+
+ if (bp->rx_nr_rings) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_RX, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ rxr->rx_doorbell = bp->bar1 + i * 0x80;
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring =
+ &rxr->rx_agg_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_AGG,
+ bp->rx_nr_rings + i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+
+ rxr->rx_agg_doorbell =
+ bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+err_out:
+ return rc;
+}
+
+static int hwrm_ring_free_send_msg(struct bnxt *bp,
+ struct bnxt_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id)
+{
+ int rc;
+ struct hwrm_ring_free_input req = {0};
+ struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 error_code;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+ req.ring_type = ring_type;
+ req.ring_id = cpu_to_le16(ring->fw_ring_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ error_code = le16_to_cpu(resp->error_code);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || error_code) {
+ switch (ring_type) {
+ case RING_FREE_REQ_RING_TYPE_CMPL:
+ netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
+ rc);
+ return rc;
+ case RING_FREE_REQ_RING_TYPE_RX:
+ netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
+ rc);
+ return rc;
+ case RING_FREE_REQ_RING_TYPE_TX:
+ netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
+ rc);
+ return rc;
+ default:
+ netdev_err(bp->dev, "Invalid ring\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+ int i, rc = 0;
+
+ if (!bp->bnapi)
+ return 0;
+
+ if (bp->tx_nr_rings) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_TX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->rx_nr_rings) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->rx_agg_nr_pages) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring =
+ &rxr->rx_agg_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].agg_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->cp_nr_rings) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].cp_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ return rc;
+}
+
+int bnxt_hwrm_set_coal(struct bnxt *bp)
+{
+ int i, rc = 0;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ u16 max_buf, max_buf_irq;
+ u16 buf_tmr, buf_tmr_irq;
+ u32 flags;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+ -1, -1);
+
+ /* Each rx completion (2 records) should be DMAed immediately */
+ max_buf = min_t(u16, bp->coal_bufs / 4, 2);
+ /* max_buf must not be zero */
+ max_buf = clamp_t(u16, max_buf, 1, 63);
+ max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
+ buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
+ buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
+
+ flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+
+ /* RING_IDLE generates more IRQs for lower latency. Enable it only
+ * if coal_ticks is less than 25 us.
+ */
+ if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
+
+ req.flags = cpu_to_le16(flags);
+ req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
+ req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
+ req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
+ req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
+ req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
+ req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
+ req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+{
+ int rc = 0, i;
+ struct hwrm_stat_ctx_free_input req = {0};
+
+ if (!bp->bnapi)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+ req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
+{
+ int rc = 0, i;
+ struct hwrm_stat_ctx_alloc_input req = {0};
+ struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+
+ req.update_period_ms = cpu_to_le32(1000);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ pf->fw_fid = le16_to_cpu(resp->fid);
+ pf->port_id = le16_to_cpu(resp->port_id);
+ memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ pf->max_pf_tx_rings = pf->max_tx_rings;
+ pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ pf->max_pf_rx_rings = pf->max_rx_rings;
+ pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ pf->max_vnics = le16_to_cpu(resp->max_vnics);
+ pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+ pf->max_vfs = le16_to_cpu(resp->max_vfs);
+ pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ vf->fw_fid = le16_to_cpu(resp->fid);
+ memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ if (!is_valid_ether_addr(vf->mac_addr))
+ random_ether_addr(vf->mac_addr);
+
+ vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ vf->max_vnics = le16_to_cpu(resp->max_vnics);
+ vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+#endif
+ }
+
+ bp->tx_push_thresh = 0;
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
+hwrm_func_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+ struct hwrm_func_reset_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
+ req.enables = 0;
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+}
+
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_queue_qportcfg_input req = {0};
+ struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u8 i, *qptr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto qportcfg_exit;
+
+ if (!resp->max_configurable_queues) {
+ rc = -EINVAL;
+ goto qportcfg_exit;
+ }
+ bp->max_tc = resp->max_configurable_queues;
+ if (bp->max_tc > BNXT_MAX_QUEUE)
+ bp->max_tc = BNXT_MAX_QUEUE;
+
+ qptr = &resp->queue_id0;
+ for (i = 0; i < bp->max_tc; i++) {
+ bp->q_info[i].queue_id = *qptr++;
+ bp->q_info[i].queue_profile = *qptr++;
+ }
+
+qportcfg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+ int rc;
+ struct hwrm_ver_get_input req = {0};
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_ver_get_exit;
+
+ memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+ if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
+ req.hwrm_intf_min != resp->hwrm_intf_min ||
+ req.hwrm_intf_upd != resp->hwrm_intf_upd) {
+ netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
+ resp->hwrm_intf_maj, resp->hwrm_intf_min,
+ resp->hwrm_intf_upd, req.hwrm_intf_maj,
+ req.hwrm_intf_min, req.hwrm_intf_upd);
+ netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
+ }
+ snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
+ resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
+ resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+
+hwrm_ver_get_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
+{
+ if (bp->vxlan_port_cnt) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ bp->vxlan_port_cnt = 0;
+ if (bp->nge_port_cnt) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ }
+ bp->nge_port_cnt = 0;
+}
+
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
+{
+ int rc, i;
+ u32 tpa_flags = 0;
+
+ if (set_tpa)
+ tpa_flags = bp->flags & BNXT_FLAG_TPA;
+ for (i = 0; i < bp->nr_vnics; i++) {
+ rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+ rc, i);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++)
+ bnxt_hwrm_vnic_set_rss(bp, i, false);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+ bool irq_re_init)
+{
+ if (bp->vnic_info) {
+ bnxt_hwrm_clear_vnic_filter(bp);
+ /* clear all RSS setting before free vnic ctx */
+ bnxt_hwrm_clear_vnic_rss(bp);
+ bnxt_hwrm_vnic_ctx_free(bp);
+ /* before free the vnic, undo the vnic tpa settings */
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, false);
+ bnxt_hwrm_vnic_free(bp);
+ }
+ bnxt_hwrm_ring_free(bp, close_path);
+ bnxt_hwrm_ring_grp_free(bp);
+ if (irq_re_init) {
+ bnxt_hwrm_stat_ctx_free(bp);
+ bnxt_hwrm_free_tunnel_ports(bp);
+ }
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+ int rc;
+
+ /* allocate context for vnic */
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+ bp->rsscos_nr_ctxs++;
+
+ /* configure default vnic, ring grp */
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+
+ /* Enable RSS hashing on vnic */
+ rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+ vnic_id, rc);
+ }
+ }
+
+vnic_setup_err:
+ return rc;
+}
+
+static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc = 0;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ u16 vnic_id = i + 1;
+ u16 ring_id = i;
+
+ if (vnic_id >= bp->nr_vnics)
+ break;
+
+ bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ break;
+ }
+ rc = bnxt_setup_vnic(bp, vnic_id);
+ if (rc)
+ break;
+ }
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+{
+ int rc = 0;
+
+ if (irq_re_init) {
+ rc = bnxt_hwrm_stat_ctx_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
+ rc);
+ goto err_out;
+ }
+ }
+
+ rc = bnxt_hwrm_ring_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_ring_grp_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
+ goto err_out;
+ }
+
+ /* default vnic 0 */
+ rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_setup_vnic(bp, 0);
+ if (rc)
+ goto err_out;
+
+ if (bp->flags & BNXT_FLAG_RFS) {
+ rc = bnxt_alloc_rfs_vnics(bp);
+ if (rc)
+ goto err_out;
+ }
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_set_tpa(bp, true);
+ if (rc)
+ goto err_out;
+ }
+
+ if (BNXT_VF(bp))
+ bnxt_update_vf_mac(bp);
+
+ /* Filter for default vnic 0 */
+ rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ goto err_out;
+ }
+ bp->vnic_info[0].uc_filter_count = 1;
+
+ bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+ if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ bp->vnic_info[0].rx_mask |=
+ CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_set_coal(bp);
+ if (rc)
+ netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
+ rc);
+
+ return 0;
+
+err_out:
+ bnxt_hwrm_resource_free(bp, 0, true);
+
+ return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_hwrm_resource_free(bp, 1, irq_re_init);
+ return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_init_rx_rings(bp);
+ bnxt_init_tx_rings(bp);
+ bnxt_init_ring_grps(bp, irq_re_init);
+ bnxt_init_vnics(bp);
+
+ return bnxt_init_chip(bp, irq_re_init);
+}
+
+static void bnxt_disable_int(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+ int i;
+
+ atomic_set(&bp->intr_sem, 0);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static int bnxt_set_real_num_queues(struct bnxt *bp)
+{
+ int rc;
+ struct net_device *dev = bp->dev;
+
+ rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+ if (rc)
+ return rc;
+
+ rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (bp->rx_nr_rings)
+ dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
+ if (!dev->rx_cpu_rmap)
+ rc = -ENOMEM;
+#endif
+
+ return rc;
+}
+
+static int bnxt_setup_msix(struct bnxt *bp)
+{
+ struct msix_entry *msix_ent;
+ struct net_device *dev = bp->dev;
+ int i, total_vecs, rc = 0;
+ const int len = sizeof(bp->irq_tbl[0].name);
+
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+ total_vecs = bp->cp_nr_rings;
+
+ msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!msix_ent)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vecs; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
+ if (total_vecs < 0) {
+ rc = -ENODEV;
+ goto msix_setup_exit;
+ }
+
+ bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+ if (bp->irq_tbl) {
+ int tcs;
+
+ /* Trim rings based upon num of vectors allocated */
+ bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
+ bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1) {
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
+ if (bp->tx_nr_rings_per_tc == 0) {
+ netdev_reset_tc(dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ } else {
+ int i, off, count;
+
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ for (i = 0; i < tcs; i++) {
+ count = bp->tx_nr_rings_per_tc;
+ off = i * count;
+ netdev_set_tc_queue(dev, i, count, off);
+ }
+ }
+ }
+ bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
+ snprintf(bp->irq_tbl[i].name, len,
+ "%s-%s-%d", dev->name, "TxRx", i);
+ bp->irq_tbl[i].handler = bnxt_msix;
+ }
+ rc = bnxt_set_real_num_queues(bp);
+ if (rc)
+ goto msix_setup_exit;
+ } else {
+ rc = -ENOMEM;
+ goto msix_setup_exit;
+ }
+ bp->flags |= BNXT_FLAG_USING_MSIX;
+ kfree(msix_ent);
+ return 0;
+
+msix_setup_exit:
+ netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+ pci_disable_msix(bp->pdev);
+ kfree(msix_ent);
+ return rc;
+}
+
+static int bnxt_setup_inta(struct bnxt *bp)
+{
+ int rc;
+ const int len = sizeof(bp->irq_tbl[0].name);
+
+ if (netdev_get_num_tc(bp->dev))
+ netdev_reset_tc(bp->dev);
+
+ bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ if (!bp->irq_tbl) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ bp->rx_nr_rings = 1;
+ bp->tx_nr_rings = 1;
+ bp->cp_nr_rings = 1;
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->irq_tbl[0].vector = bp->pdev->irq;
+ snprintf(bp->irq_tbl[0].name, len,
+ "%s-%s-%d", bp->dev->name, "TxRx", 0);
+ bp->irq_tbl[0].handler = bnxt_inta;
+ rc = bnxt_set_real_num_queues(bp);
+ return rc;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (bp->flags & BNXT_FLAG_MSIX_CAP)
+ rc = bnxt_setup_msix(bp);
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ /* fallback to INTA */
+ rc = bnxt_setup_inta(bp);
+ }
+ return rc;
+}
+
+static void bnxt_free_irq(struct bnxt *bp)
+{
+ struct bnxt_irq *irq;
+ int i;
+
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
+ bp->dev->rx_cpu_rmap = NULL;
+#endif
+ if (!bp->irq_tbl)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ irq = &bp->irq_tbl[i];
+ if (irq->requested)
+ free_irq(irq->vector, bp->bnapi[i]);
+ irq->requested = 0;
+ }
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ pci_disable_msix(bp->pdev);
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
+}
+
+static int bnxt_request_irq(struct bnxt *bp)
+{
+ int i, rc = 0;
+ unsigned long flags = 0;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
+#endif
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+ flags = IRQF_SHARED;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_irq *irq = &bp->irq_tbl[i];
+#ifdef CONFIG_RFS_ACCEL
+ if (rmap && (i < bp->rx_nr_rings)) {
+ rc = irq_cpu_rmap_add(rmap, irq->vector);
+ if (rc)
+ netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
+ i);
+ }
+#endif
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ bp->bnapi[i]);
+ if (rc)
+ break;
+
+ irq->requested = 1;
+ }
+ return rc;
+}
+
+static void bnxt_del_napi(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+
+ napi_hash_del(&bnapi->napi);
+ netif_napi_del(&bnapi->napi);
+ }
+}
+
+static void bnxt_init_napi(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+
+ if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ netif_napi_add(bp->dev, &bnapi->napi,
+ bnxt_poll, 64);
+ napi_hash_add(&bnapi->napi);
+ }
+ } else {
+ bnapi = bp->bnapi[0];
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ napi_hash_add(&bnapi->napi);
+ }
+}
+
+static void bnxt_disable_napi(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ napi_disable(&bp->bnapi[i]->napi);
+ bnxt_disable_poll(bp->bnapi[i]);
+ }
+}
+
+static void bnxt_enable_napi(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnxt_enable_poll(bp->bnapi[i]);
+ napi_enable(&bp->bnapi[i]->napi);
+ }
+}
+
+static void bnxt_tx_disable(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+
+ if (bp->bnapi) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(bp->dev, i);
+ __netif_tx_lock(txq, smp_processor_id());
+ txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ __netif_tx_unlock(txq);
+ }
+ }
+ /* Stop all TX queues */
+ netif_tx_disable(bp->dev);
+ netif_carrier_off(bp->dev);
+}
+
+static void bnxt_tx_enable(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(bp->dev, i);
+ txr->dev_state = 0;
+ }
+ netif_tx_wake_all_queues(bp->dev);
+ if (bp->link_info.link_up)
+ netif_carrier_on(bp->dev);
+}
+
+static void bnxt_report_link(struct bnxt *bp)
+{
+ if (bp->link_info.link_up) {
+ const char *duplex;
+ const char *flow_ctrl;
+ u16 speed;
+
+ netif_carrier_on(bp->dev);
+ if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+ duplex = "full";
+ else
+ duplex = "half";
+ if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
+ flow_ctrl = "ON - receive & transmit";
+ else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
+ flow_ctrl = "ON - transmit";
+ else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
+ flow_ctrl = "ON - receive";
+ else
+ flow_ctrl = "none";
+ speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ speed, duplex, flow_ctrl);
+ } else {
+ netif_carrier_off(bp->dev);
+ netdev_err(bp->dev, "NIC Link is Down\n");
+ }
+}
+
+static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ struct hwrm_port_phy_qcfg_input req = {0};
+ struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u8 link_up = link_info->link_up;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+ }
+
+ memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
+ link_info->phy_link_status = resp->link;
+ link_info->duplex = resp->duplex;
+ link_info->pause = resp->pause;
+ link_info->auto_mode = resp->auto_mode;
+ link_info->auto_pause_setting = resp->auto_pause;
+ link_info->force_pause_setting = resp->force_pause;
+ link_info->duplex_setting = resp->duplex_setting;
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ link_info->link_speed = le16_to_cpu(resp->link_speed);
+ else
+ link_info->link_speed = 0;
+ link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+ link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
+ link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+ link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+ link_info->preemphasis = le32_to_cpu(resp->preemphasis);
+ link_info->phy_ver[0] = resp->phy_maj;
+ link_info->phy_ver[1] = resp->phy_min;
+ link_info->phy_ver[2] = resp->phy_bld;
+ link_info->media_type = resp->media_type;
+ link_info->transceiver = resp->transceiver_type;
+ link_info->phy_addr = resp->phy_addr;
+
+ /* TODO: need to add more logic to report VF link */
+ if (chng_link_state) {
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ link_info->link_up = 1;
+ else
+ link_info->link_up = 0;
+ if (link_up != link_info->link_up)
+ bnxt_report_link(bp);
+ } else {
+ /* alwasy link down if not require to update link state */
+ link_info->link_up = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
+static void
+bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
+{
+ if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+ } else {
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+ req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+ req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+ }
+}
+
+static void bnxt_hwrm_set_link_common(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ u8 autoneg = bp->link_info.autoneg;
+ u16 fw_link_speed = bp->link_info.req_link_speed;
+ u32 advertising = bp->link_info.advertising;
+
+ if (autoneg & BNXT_AUTONEG_SPEED) {
+ req->auto_mode |=
+ PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+
+ req->enables |= cpu_to_le32(
+ PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+ req->auto_link_speed_mask = cpu_to_le16(advertising);
+
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
+ req->flags |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+ } else {
+ req->force_link_speed = cpu_to_le16(fw_link_speed);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+ }
+
+ /* currently don't support half duplex */
+ req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
+ /* tell chimp that the setting takes effect immediately */
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+}
+
+int bnxt_hwrm_set_pause(struct bnxt *bp)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ bnxt_hwrm_set_pause_common(bp, &req);
+
+ if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
+ bp->link_info.force_link_chng)
+ bnxt_hwrm_set_link_common(bp, &req);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
+ /* since changing of pause setting doesn't trigger any link
+ * change event, the driver needs to update the current pause
+ * result upon successfully return of the phy_cfg command
+ */
+ bp->link_info.pause =
+ bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
+ bp->link_info.auto_pause_setting = 0;
+ if (!bp->link_info.force_link_chng)
+ bnxt_report_link(bp);
+ }
+ bp->link_info.force_link_chng = false;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ if (set_pause)
+ bnxt_hwrm_set_pause_common(bp, &req);
+
+ bnxt_hwrm_set_link_common(bp, &req);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_update_phy_setting(struct bnxt *bp)
+{
+ int rc;
+ bool update_link = false;
+ bool update_pause = false;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ rc = bnxt_update_link(bp, true);
+ if (rc) {
+ netdev_err(bp->dev, "failed to update link (rc: %x)\n",
+ rc);
+ return rc;
+ }
+ if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+ link_info->auto_pause_setting != link_info->req_flow_ctrl)
+ update_pause = true;
+ if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+ link_info->force_pause_setting != link_info->req_flow_ctrl)
+ update_pause = true;
+ if (link_info->req_duplex != link_info->duplex_setting)
+ update_link = true;
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ if (BNXT_AUTO_MODE(link_info->auto_mode))
+ update_link = true;
+ if (link_info->req_link_speed != link_info->force_link_speed)
+ update_link = true;
+ } else {
+ if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
+ update_link = true;
+ if (link_info->advertising != link_info->auto_link_speeds)
+ update_link = true;
+ if (link_info->req_link_speed != link_info->auto_link_speed)
+ update_link = true;
+ }
+
+ if (update_link)
+ rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+ else if (update_pause)
+ rc = bnxt_hwrm_set_pause(bp);
+ if (rc) {
+ netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+ netif_carrier_off(bp->dev);
+ if (irq_re_init) {
+ rc = bnxt_setup_int_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+ rc);
+ return rc;
+ }
+ }
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ /* disable RFS if falling back to INTA */
+ bp->dev->hw_features &= ~NETIF_F_NTUPLE;
+ bp->flags &= ~BNXT_FLAG_RFS;
+ }
+
+ rc = bnxt_alloc_mem(bp, irq_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+ goto open_err_free_mem;
+ }
+
+ if (irq_re_init) {
+ bnxt_init_napi(bp);
+ rc = bnxt_request_irq(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
+ goto open_err;
+ }
+ }
+
+ bnxt_enable_napi(bp);
+
+ rc = bnxt_init_nic(bp, irq_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+ goto open_err;
+ }
+
+ if (link_re_init) {
+ rc = bnxt_update_phy_setting(bp);
+ if (rc)
+ goto open_err;
+ }
+
+ if (irq_re_init) {
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+ vxlan_get_rx_port(bp->dev);
+#endif
+ if (!bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, htons(0x17c1),
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
+ bp->nge_port_cnt = 1;
+ }
+
+ bp->state = BNXT_STATE_OPEN;
+ bnxt_enable_int(bp);
+ /* Enable TX queues */
+ bnxt_tx_enable(bp);
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+ return 0;
+
+open_err:
+ bnxt_disable_napi(bp);
+ bnxt_del_napi(bp);
+
+open_err_free_mem:
+ bnxt_free_skbs(bp);
+ bnxt_free_irq(bp);
+ bnxt_free_mem(bp, true);
+ return rc;
+}
+
+/* rtnl_lock held */
+int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+ rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
+ dev_close(bp->dev);
+ }
+ return rc;
+}
+
+static int bnxt_open(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+ rc);
+ rc = -1;
+ return rc;
+ }
+ return __bnxt_open_nic(bp, true, true);
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+ int i;
+
+ atomic_inc(&bp->intr_sem);
+ if (!netif_running(bp->dev))
+ return;
+
+ bnxt_disable_int(bp);
+ for (i = 0; i < bp->cp_nr_rings; i++)
+ synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (bp->sriov_cfg) {
+ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ !bp->sriov_cfg,
+ BNXT_SRIOV_CFG_WAIT_TMO);
+ if (rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ }
+#endif
+ /* Change device state to avoid TX queue wake up's */
+ bnxt_tx_disable(bp);
+
+ bp->state = BNXT_STATE_CLOSED;
+ cancel_work_sync(&bp->sp_task);
+
+ /* Flush rings before disabling interrupts */
+ bnxt_shutdown_nic(bp, irq_re_init);
+
+ /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ del_timer_sync(&bp->timer);
+ bnxt_free_skbs(bp);
+
+ if (irq_re_init) {
+ bnxt_free_irq(bp);
+ bnxt_del_napi(bp);
+ }
+ bnxt_free_mem(bp, irq_re_init);
+ return rc;
+}
+
+static int bnxt_close(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ bnxt_close_nic(bp, true, true);
+ return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ /* fallthru */
+ case SIOCGMIIREG: {
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return 0;
+ }
+
+ case SIOCSMIIREG:
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return 0;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct rtnl_link_stats64 *
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ u32 i;
+ struct bnxt *bp = netdev_priv(dev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+ if (!bp->bnapi)
+ return stats;
+
+ /* TODO check if we need to synchronize with bnxt_close path */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+
+ stats->rx_missed_errors +=
+ le64_to_cpu(hw_stats->rx_discard_pkts);
+
+ stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+
+ stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
+
+ stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+ }
+
+ return stats;
+}
+
+static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ u8 *haddr;
+ int mc_count = 0;
+ bool update = false;
+ int off = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mc_count >= BNXT_MAX_MC_ADDRS) {
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ return false;
+ }
+ haddr = ha->addr;
+ if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+ memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+ update = true;
+ }
+ off += ETH_ALEN;
+ mc_count++;
+ }
+ if (mc_count)
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+ if (mc_count != vnic->mc_list_count) {
+ vnic->mc_list_count = mc_count;
+ update = true;
+ }
+ return update;
+}
+
+static bool bnxt_uc_list_updated(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ int off = 0;
+
+ if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ return true;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+ return true;
+
+ off += ETH_ALEN;
+ }
+ return false;
+}
+
+static void bnxt_set_rx_mode(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ u32 mask = vnic->rx_mask;
+ bool mc_update = false;
+ bool uc_update;
+
+ if (!netif_running(dev))
+ return;
+
+ mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
+ CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+
+ /* Only allow PF to be in promiscuous mode */
+ if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ uc_update = bnxt_uc_list_updated(bp);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ } else {
+ mc_update = bnxt_mc_list_updated(bp, &mask);
+ }
+
+ if (mask != vnic->rx_mask || uc_update || mc_update) {
+ vnic->rx_mask = mask;
+
+ set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+}
+
+static void bnxt_cfg_rx_mode(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ int i, off = 0, rc;
+ bool uc_update;
+
+ netif_addr_lock_bh(dev);
+ uc_update = bnxt_uc_list_updated(bp);
+ netif_addr_unlock_bh(dev);
+
+ if (!uc_update)
+ goto skip_uc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 1; i < vnic->uc_filter_count; i++) {
+ struct hwrm_cfa_l2_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
+ -1);
+
+ req.l2_filter_id = vnic->fw_l2_filter_id[i];
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ vnic->uc_filter_count = 1;
+
+ netif_addr_lock_bh(dev);
+ if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ } else {
+ netdev_for_each_uc_addr(ha, dev) {
+ memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+ off += ETH_ALEN;
+ vnic->uc_filter_count++;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+
+ for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+ rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
+ rc);
+ vnic->uc_filter_count = i;
+ }
+ }
+
+skip_uc:
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc)
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+ rc);
+}
+
+static netdev_features_t bnxt_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ return features;
+}
+
+static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 flags = bp->flags;
+ u32 changes;
+ int rc = 0;
+ bool re_init = false;
+ bool update_tpa = false;
+
+ flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
+ if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ flags |= BNXT_FLAG_GRO;
+ if (features & NETIF_F_LRO)
+ flags |= BNXT_FLAG_LRO;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ flags |= BNXT_FLAG_STRIP_VLAN;
+
+ if (features & NETIF_F_NTUPLE)
+ flags |= BNXT_FLAG_RFS;
+
+ changes = flags ^ bp->flags;
+ if (changes & BNXT_FLAG_TPA) {
+ update_tpa = true;
+ if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
+ (flags & BNXT_FLAG_TPA) == 0)
+ re_init = true;
+ }
+
+ if (changes & ~BNXT_FLAG_TPA)
+ re_init = true;
+
+ if (flags != bp->flags) {
+ u32 old_flags = bp->flags;
+
+ bp->flags = flags;
+
+ if (!netif_running(dev)) {
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return rc;
+ }
+
+ if (re_init) {
+ bnxt_close_nic(bp, false, false);
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+
+ return bnxt_open_nic(bp, false, false);
+ }
+ if (update_tpa) {
+ rc = bnxt_set_tpa(bp,
+ (flags & BNXT_FLAG_TPA) ?
+ true : false);
+ if (rc)
+ bp->flags = old_flags;
+ }
+ }
+ return rc;
+}
+
+static void bnxt_dbg_dump_states(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ rxr = &bnapi->rx_ring;
+ cpr = &bnapi->cp_ring;
+ if (netif_msg_drv(bp)) {
+ netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+ i, txr->tx_ring_struct.fw_ring_id,
+ txr->tx_prod, txr->tx_cons);
+ netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
+ i, rxr->rx_ring_struct.fw_ring_id,
+ rxr->rx_prod,
+ rxr->rx_agg_ring_struct.fw_ring_id,
+ rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
+ netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
+ i, cpr->cp_ring_struct.fw_ring_id,
+ cpr->cp_raw_cons);
+ }
+ }
+}
+
+static void bnxt_reset_task(struct bnxt *bp)
+{
+ bnxt_dbg_dump_states(bp);
+ if (netif_running(bp->dev))
+ bnxt_tx_disable(bp); /* prevent tx timout again */
+}
+
+static void bnxt_tx_timeout(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnxt_poll_controller(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_irq *irq = &bp->irq_tbl[i];
+
+ disable_irq(irq->vector);
+ irq->handler(irq->vector, bp->bnapi[i]);
+ enable_irq(irq->vector);
+ }
+}
+#endif
+
+static void bnxt_timer(unsigned long data)
+{
+ struct bnxt *bp = (struct bnxt *)data;
+ struct net_device *dev = bp->dev;
+
+ if (!netif_running(dev))
+ return;
+
+ if (atomic_read(&bp->intr_sem) != 0)
+ goto bnxt_restart_timer;
+
+bnxt_restart_timer:
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *);
+
+static void bnxt_sp_task(struct work_struct *work)
+{
+ struct bnxt *bp = container_of(work, struct bnxt, sp_task);
+ int rc;
+
+ if (bp->state != BNXT_STATE_OPEN)
+ return;
+
+ if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
+ bnxt_cfg_rx_mode(bp);
+
+ if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
+ bnxt_cfg_ntp_filters(bp);
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ rc = bnxt_update_link(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_exec_fwd_req(bp);
+ if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, bp->vxlan_port,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+ bnxt_reset_task(bp);
+}
+
+static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+ int rc;
+ struct bnxt *bp = netdev_priv(dev);
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ goto init_err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_disable;
+ }
+
+ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto init_err_disable;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+ goto init_err_disable;
+ }
+
+ pci_set_master(pdev);
+
+ bp->dev = dev;
+ bp->pdev = pdev;
+
+ bp->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!bp->bar0) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ bp->bar1 = pci_ioremap_bar(pdev, 2);
+ if (!bp->bar1) {
+ dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ bp->bar2 = pci_ioremap_bar(pdev, 4);
+ if (!bp->bar2) {
+ dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ INIT_WORK(&bp->sp_task, bnxt_sp_task);
+
+ spin_lock_init(&bp->ntp_fltr_lock);
+
+ bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
+ bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
+
+ bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
+ bp->coal_bufs = 20;
+ bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
+ bp->coal_bufs_irq = 2;
+
+ init_timer(&bp->timer);
+ bp->timer.data = (unsigned long)bp;
+ bp->timer.function = bnxt_timer;
+ bp->current_interval = BNXT_TIMER_INTERVAL;
+
+ bp->state = BNXT_STATE_CLOSED;
+
+ return 0;
+
+init_err_release:
+ if (bp->bar2) {
+ pci_iounmap(pdev, bp->bar2);
+ bp->bar2 = NULL;
+ }
+
+ if (bp->bar1) {
+ pci_iounmap(pdev, bp->bar1);
+ bp->bar1 = NULL;
+ }
+
+ if (bp->bar0) {
+ pci_iounmap(pdev, bp->bar0);
+ bp->bar0 = NULL;
+ }
+
+ pci_release_regions(pdev);
+
+init_err_disable:
+ pci_disable_device(pdev);
+
+init_err:
+ return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (new_mtu < 60 || new_mtu > 9000)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ bnxt_close_nic(bp, false, false);
+
+ dev->mtu = new_mtu;
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, false, false);
+
+ return 0;
+}
+
+static int bnxt_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (tc > bp->max_tc) {
+ netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+ tc, bp->max_tc);
+ return -EINVAL;
+ }
+
+ if (netdev_get_num_tc(dev) == tc)
+ return 0;
+
+ if (tc) {
+ int max_rx_rings, max_tx_rings;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
+ return -ENOMEM;
+ }
+
+ /* Needs to close the device and do hw resource re-allocations */
+ if (netif_running(bp->dev))
+ bnxt_close_nic(bp, true, false);
+
+ if (tc) {
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
+ netdev_set_num_tc(dev, tc);
+ } else {
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ netdev_reset_tc(dev);
+ }
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (netif_running(bp->dev))
+ return bnxt_open_nic(bp, true, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
+ struct bnxt_ntuple_filter *f2)
+{
+ struct flow_keys *keys1 = &f1->fkeys;
+ struct flow_keys *keys2 = &f2->fkeys;
+
+ if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
+ keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
+ keys1->ports.ports == keys2->ports.ports &&
+ keys1->basic.ip_proto == keys2->basic.ip_proto &&
+ keys1->basic.n_proto == keys2->basic.n_proto &&
+ ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
+ return true;
+
+ return false;
+}
+
+static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ntuple_filter *fltr, *new_fltr;
+ struct flow_keys *fkeys;
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+ int rc = 0, idx;
+ struct hlist_head *head;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
+ if (!new_fltr)
+ return -ENOMEM;
+
+ fkeys = &new_fltr->fkeys;
+ if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+
+ if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+ ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
+ (fkeys->basic.ip_proto != IPPROTO_UDP))) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+
+ memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+
+ idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (bnxt_fltr_match(fltr, new_fltr)) {
+ rcu_read_unlock();
+ rc = 0;
+ goto err_free;
+ }
+ }
+ rcu_read_unlock();
+
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+ BNXT_NTP_FLTR_MAX_FLTR, 0);
+ if (new_fltr->sw_id < 0) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ new_fltr->flow_id = flow_id;
+ new_fltr->rxq = rxq_index;
+ hlist_add_head_rcu(&new_fltr->hash, head);
+ bp->ntp_fltr_count++;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+
+ set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+
+ return new_fltr->sw_id;
+
+err_free:
+ kfree(new_fltr);
+ return rc;
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_ntuple_filter *fltr;
+ int rc;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ bool del = false;
+
+ if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
+ if (rps_may_expire_flow(bp->dev, fltr->rxq,
+ fltr->flow_id,
+ fltr->sw_id)) {
+ bnxt_hwrm_cfa_ntuple_filter_free(bp,
+ fltr);
+ del = true;
+ }
+ } else {
+ rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
+ fltr);
+ if (rc)
+ del = true;
+ else
+ set_bit(BNXT_FLTR_VALID, &fltr->state);
+ }
+
+ if (del) {
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ hlist_del_rcu(&fltr->hash);
+ bp->ntp_fltr_count--;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ synchronize_rcu();
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ kfree(fltr);
+ }
+ }
+ }
+}
+
+#else
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ if (sa_family != AF_INET6 && sa_family != AF_INET)
+ return;
+
+ if (bp->vxlan_port_cnt && bp->vxlan_port != port)
+ return;
+
+ bp->vxlan_port_cnt++;
+ if (bp->vxlan_port_cnt == 1) {
+ bp->vxlan_port = port;
+ set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+}
+
+static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ if (sa_family != AF_INET6 && sa_family != AF_INET)
+ return;
+
+ if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+ bp->vxlan_port_cnt--;
+
+ if (bp->vxlan_port_cnt == 0) {
+ set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+ }
+}
+
+static const struct net_device_ops bnxt_netdev_ops = {
+ .ndo_open = bnxt_open,
+ .ndo_start_xmit = bnxt_start_xmit,
+ .ndo_stop = bnxt_close,
+ .ndo_get_stats64 = bnxt_get_stats64,
+ .ndo_set_rx_mode = bnxt_set_rx_mode,
+ .ndo_do_ioctl = bnxt_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnxt_change_mac_addr,
+ .ndo_change_mtu = bnxt_change_mtu,
+ .ndo_fix_features = bnxt_fix_features,
+ .ndo_set_features = bnxt_set_features,
+ .ndo_tx_timeout = bnxt_tx_timeout,
+#ifdef CONFIG_BNXT_SRIOV
+ .ndo_get_vf_config = bnxt_get_vf_config,
+ .ndo_set_vf_mac = bnxt_set_vf_mac,
+ .ndo_set_vf_vlan = bnxt_set_vf_vlan,
+ .ndo_set_vf_rate = bnxt_set_vf_bw,
+ .ndo_set_vf_link_state = bnxt_set_vf_link_state,
+ .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bnxt_poll_controller,
+#endif
+ .ndo_setup_tc = bnxt_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = bnxt_rx_flow_steer,
+#endif
+ .ndo_add_vxlan_port = bnxt_add_vxlan_port,
+ .ndo_del_vxlan_port = bnxt_del_vxlan_port,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = bnxt_busy_poll,
+#endif
+};
+
+static void bnxt_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (BNXT_PF(bp))
+ bnxt_sriov_disable(bp);
+
+ unregister_netdev(dev);
+ cancel_work_sync(&bp->sp_task);
+ bp->sp_event = 0;
+
+ bnxt_free_hwrm_resources(bp);
+ pci_iounmap(pdev, bp->bar2);
+ pci_iounmap(pdev, bp->bar1);
+ pci_iounmap(pdev, bp->bar0);
+ free_netdev(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int bnxt_probe_phy(struct bnxt *bp)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ char phy_ver[PHY_VER_STR_LEN];
+
+ rc = bnxt_update_link(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
+ /*initialize the ethool setting copy with NVM settings */
+ if (BNXT_AUTO_MODE(link_info->auto_mode))
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
+
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl = link_info->auto_pause_setting;
+ } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ link_info->req_flow_ctrl = link_info->force_pause_setting;
+ }
+ link_info->req_duplex = link_info->duplex_setting;
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+ link_info->req_link_speed = link_info->auto_link_speed;
+ else
+ link_info->req_link_speed = link_info->force_link_speed;
+ link_info->advertising = link_info->auto_link_speeds;
+ snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
+ link_info->phy_ver[0],
+ link_info->phy_ver[1],
+ link_info->phy_ver[2]);
+ strcat(bp->fw_ver_str, phy_ver);
+ return rc;
+}
+
+static int bnxt_get_max_irq(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ if (!pdev->msix_cap)
+ return 1;
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
+{
+ int max_rings = 0;
+
+ if (BNXT_PF(bp)) {
+ *max_tx = bp->pf.max_pf_tx_rings;
+ *max_rx = bp->pf.max_pf_rx_rings;
+ max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+ max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ *max_tx = bp->vf.max_tx_rings;
+ *max_rx = bp->vf.max_rx_rings;
+ max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
+ max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
+#endif
+ }
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ *max_rx >>= 1;
+
+ *max_rx = min_t(int, *max_rx, max_rings);
+ *max_tx = min_t(int, *max_tx, max_rings);
+}
+
+static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int version_printed;
+ struct net_device *dev;
+ struct bnxt *bp;
+ int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
+
+ if (version_printed++ == 0)
+ pr_info("%s", version);
+
+ max_irqs = bnxt_get_max_irq(pdev);
+ dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+ if (!dev)
+ return -ENOMEM;
+
+ bp = netdev_priv(dev);
+
+ if (bnxt_vf_pciid(ent->driver_data))
+ bp->flags |= BNXT_FLAG_VF;
+
+ if (pdev->msix_cap) {
+ bp->flags |= BNXT_FLAG_MSIX_CAP;
+ if (BNXT_PF(bp))
+ bp->flags |= BNXT_FLAG_RFS;
+ }
+
+ rc = bnxt_init_board(pdev, dev);
+ if (rc < 0)
+ goto init_err_free;
+
+ dev->netdev_ops = &bnxt_netdev_ops;
+ dev->watchdog_timeo = BNXT_TX_TIMEOUT;
+ dev->ethtool_ops = &bnxt_ethtool_ops;
+
+ pci_set_drvdata(pdev, dev);
+
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
+
+ if (bp->flags & BNXT_FLAG_RFS)
+ dev->hw_features |= NETIF_F_NTUPLE;
+
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+#ifdef CONFIG_BNXT_SRIOV
+ init_waitqueue_head(&bp->sriov_cfg_wait);
+#endif
+ rc = bnxt_alloc_hwrm_resources(bp);
+ if (rc)
+ goto init_err;
+
+ mutex_init(&bp->hwrm_cmd_lock);
+ bnxt_hwrm_ver_get(bp);
+
+ rc = bnxt_hwrm_func_drv_rgtr(bp);
+ if (rc)
+ goto init_err;
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err;
+ }
+
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err;
+ }
+
+ bnxt_set_tpa_flags(bp);
+ bnxt_set_ring_params(bp);
+ dflt_rings = netif_get_num_default_rss_queues();
+ if (BNXT_PF(bp)) {
+ memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ bp->pf.max_irqs = max_irqs;
+ } else {
+#if defined(CONFIG_BNXT_SRIOV)
+ memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+ bp->vf.max_irqs = max_irqs;
+#endif
+ }
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
+ bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+ bp->flags |= BNXT_FLAG_STRIP_VLAN;
+
+ rc = bnxt_probe_phy(bp);
+ if (rc)
+ goto init_err;
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto init_err;
+
+ netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (long)pci_resource_start(pdev, 0), dev->dev_addr);
+
+ return 0;
+
+init_err:
+ pci_iounmap(pdev, bp->bar0);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+init_err_free:
+ free_netdev(dev);
+ return rc;
+}
+
+static struct pci_driver bnxt_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bnxt_pci_tbl,
+ .probe = bnxt_init_one,
+ .remove = bnxt_remove_one,
+#if defined(CONFIG_BNXT_SRIOV)
+ .sriov_configure = bnxt_sriov_configure,
+#endif
+};
+
+module_pci_driver(bnxt_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
new file mode 100644
index 000000000000..4f2267ca482d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -0,0 +1,1086 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_H
+#define BNXT_H
+
+#define DRV_MODULE_NAME "bnxt_en"
+#define DRV_MODULE_VERSION "0.1.24"
+
+#define DRV_VER_MAJ 0
+#define DRV_VER_MIN 1
+#define DRV_VER_UPD 24
+
+struct tx_bd {
+ __le32 tx_bd_len_flags_type;
+ #define TX_BD_TYPE (0x3f << 0)
+ #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
+ #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
+ #define TX_BD_FLAGS_PACKET_END (1 << 6)
+ #define TX_BD_FLAGS_NO_CMPL (1 << 7)
+ #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
+ #define TX_BD_FLAGS_BD_CNT_SHIFT 8
+ #define TX_BD_FLAGS_LHINT (3 << 13)
+ #define TX_BD_FLAGS_LHINT_SHIFT 13
+ #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
+ #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
+ #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
+ #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
+ #define TX_BD_FLAGS_COAL_NOW (1 << 15)
+ #define TX_BD_LEN (0xffff << 16)
+ #define TX_BD_LEN_SHIFT 16
+
+ u32 tx_bd_opaque;
+ __le64 tx_bd_haddr;
+} __packed;
+
+struct tx_bd_ext {
+ __le32 tx_bd_hsize_lflags;
+ #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
+ #define TX_BD_FLAGS_IP_CKSUM (1 << 1)
+ #define TX_BD_FLAGS_NO_CRC (1 << 2)
+ #define TX_BD_FLAGS_STAMP (1 << 3)
+ #define TX_BD_FLAGS_T_IP_CHKSUM (1 << 4)
+ #define TX_BD_FLAGS_LSO (1 << 5)
+ #define TX_BD_FLAGS_IPID_FMT (1 << 6)
+ #define TX_BD_FLAGS_T_IPID (1 << 7)
+ #define TX_BD_HSIZE (0xff << 16)
+ #define TX_BD_HSIZE_SHIFT 16
+
+ __le32 tx_bd_mss;
+ __le32 tx_bd_cfa_action;
+ #define TX_BD_CFA_ACTION (0xffff << 16)
+ #define TX_BD_CFA_ACTION_SHIFT 16
+
+ __le32 tx_bd_cfa_meta;
+ #define TX_BD_CFA_META_MASK 0xfffffff
+ #define TX_BD_CFA_META_VID_MASK 0xfff
+ #define TX_BD_CFA_META_PRI_MASK (0xf << 12)
+ #define TX_BD_CFA_META_PRI_SHIFT 12
+ #define TX_BD_CFA_META_TPID_MASK (3 << 16)
+ #define TX_BD_CFA_META_TPID_SHIFT 16
+ #define TX_BD_CFA_META_KEY (0xf << 28)
+ #define TX_BD_CFA_META_KEY_SHIFT 28
+ #define TX_BD_CFA_META_KEY_VLAN (1 << 28)
+};
+
+struct rx_bd {
+ __le32 rx_bd_len_flags_type;
+ #define RX_BD_TYPE (0x3f << 0)
+ #define RX_BD_TYPE_RX_PACKET_BD 0x4
+ #define RX_BD_TYPE_RX_BUFFER_BD 0x5
+ #define RX_BD_TYPE_RX_AGG_BD 0x6
+ #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
+ #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
+ #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
+ #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
+ #define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_EOP (1 << 7)
+ #define RX_BD_FLAGS_BUFFERS (3 << 8)
+ #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
+ #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
+ #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
+ #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
+ #define RX_BD_LEN (0xffff << 16)
+ #define RX_BD_LEN_SHIFT 16
+
+ u32 rx_bd_opaque;
+ __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+ __le32 tx_cmp_flags_type;
+ #define CMP_TYPE (0x3f << 0)
+ #define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_RX_L2_CMP 17
+ #define CMP_TYPE_RX_AGG_CMP 18
+ #define CMP_TYPE_RX_L2_TPA_START_CMP 19
+ #define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_STATUS_CMP 32
+ #define CMP_TYPE_REMOTE_DRIVER_REQ 34
+ #define CMP_TYPE_REMOTE_DRIVER_RESP 36
+ #define CMP_TYPE_ERROR_STATUS 48
+ #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0)
+ #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+
+ #define TX_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_CMP_FLAGS_PUSH (1 << 7)
+
+ u32 tx_cmp_opaque;
+ __le32 tx_cmp_errors_v;
+ #define TX_CMP_V (1 << 0)
+ #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
+ #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
+ #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
+ #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
+ #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
+ #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
+ #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
+ #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
+ #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
+
+ __le32 tx_cmp_unsed_3;
+};
+
+struct rx_cmp {
+ __le32 rx_cmp_len_flags_type;
+ #define RX_CMP_CMP_TYPE (0x3f << 0)
+ #define RX_CMP_FLAGS_ERROR (1 << 6)
+ #define RX_CMP_FLAGS_PLACEMENT (7 << 7)
+ #define RX_CMP_FLAGS_RSS_VALID (1 << 10)
+ #define RX_CMP_FLAGS_UNUSED (1 << 11)
+ #define RX_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
+ #define RX_CMP_FLAGS_ITYPE_IP (1 << 12)
+ #define RX_CMP_FLAGS_ITYPE_TCP (2 << 12)
+ #define RX_CMP_FLAGS_ITYPE_UDP (3 << 12)
+ #define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12)
+ #define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12)
+ #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12)
+ #define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12)
+ #define RX_CMP_LEN (0xffff << 16)
+ #define RX_CMP_LEN_SHIFT 16
+
+ u32 rx_cmp_opaque;
+ __le32 rx_cmp_misc_v1;
+ #define RX_CMP_V1 (1 << 0)
+ #define RX_CMP_AGG_BUFS (0x1f << 1)
+ #define RX_CMP_AGG_BUFS_SHIFT 1
+ #define RX_CMP_RSS_HASH_TYPE (0x7f << 9)
+ #define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_CMP_PAYLOAD_OFFSET (0xff << 16)
+ #define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+
+ __le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_HASH_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define RX_CMP_HASH_TYPE(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+ RX_CMP_RSS_HASH_TYPE_SHIFT)
+
+struct rx_cmp_ext {
+ __le32 rx_cmp_flags2;
+ #define RX_CMP_FLAGS2_IP_CS_CALC 0x1
+ #define RX_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
+ #define RX_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
+ #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+ #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
+ __le32 rx_cmp_meta_data;
+ #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
+ #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
+ #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
+ __le32 rx_cmp_cfa_code_errors_v2;
+ #define RX_CMP_V (1 << 0)
+ #define RX_CMPL_ERRORS_MASK (0x7fff << 1)
+ #define RX_CMPL_ERRORS_SFT 1
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_CMPL_ERRORS_IP_CS_ERROR (0x1 << 4)
+ #define RX_CMPL_ERRORS_L4_CS_ERROR (0x1 << 5)
+ #define RX_CMPL_ERRORS_T_IP_CS_ERROR (0x1 << 6)
+ #define RX_CMPL_ERRORS_T_L4_CS_ERROR (0x1 << 7)
+ #define RX_CMPL_ERRORS_CRC_ERROR (0x1 << 8)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK (0x7 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9)
+ #define RX_CMPL_ERRORS_PKT_ERROR_MASK (0xf << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12)
+
+ #define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
+ #define RX_CMPL_CFA_CODE_SFT 16
+
+ __le32 rx_cmp_unused3;
+};
+
+#define RX_CMP_L2_ERRORS \
+ cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS \
+ (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS \
+ (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1) \
+ (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \
+ !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_ENCAP(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \
+ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+struct rx_agg_cmp {
+ __le32 rx_agg_cmp_len_flags_type;
+ #define RX_AGG_CMP_TYPE (0x3f << 0)
+ #define RX_AGG_CMP_LEN (0xffff << 16)
+ #define RX_AGG_CMP_LEN_SHIFT 16
+ u32 rx_agg_cmp_opaque;
+ __le32 rx_agg_cmp_v;
+ #define RX_AGG_CMP_V (1 << 0)
+ __le32 rx_agg_cmp_unused;
+};
+
+struct rx_tpa_start_cmp {
+ __le32 rx_tpa_start_cmp_len_flags_type;
+ #define RX_TPA_START_CMP_TYPE (0x3f << 0)
+ #define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
+ #define RX_TPA_START_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+ #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
+ #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+ #define RX_TPA_START_CMP_LEN (0xffff << 16)
+ #define RX_TPA_START_CMP_LEN_SHIFT 16
+
+ u32 rx_tpa_start_cmp_opaque;
+ __le32 rx_tpa_start_cmp_misc_v1;
+ #define RX_TPA_START_CMP_V1 (0x1 << 0)
+ #define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9)
+ #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
+ #define RX_TPA_START_CMP_AGG_ID_SHIFT 25
+
+ __le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT)
+
+#define TPA_START_AGG_ID(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+struct rx_tpa_start_cmp_ext {
+ __le32 rx_tpa_start_cmp_flags2;
+ #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
+ #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
+ #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
+ #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+
+ __le32 rx_tpa_start_cmp_metadata;
+ __le32 rx_tpa_start_cmp_cfa_code_v2;
+ #define RX_TPA_START_CMP_V2 (0x1 << 0)
+ #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
+ #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+ __le32 rx_tpa_start_cmp_unused5;
+};
+
+struct rx_tpa_end_cmp {
+ __le32 rx_tpa_end_cmp_len_flags_type;
+ #define RX_TPA_END_CMP_TYPE (0x3f << 0)
+ #define RX_TPA_END_CMP_FLAGS (0x3ff << 6)
+ #define RX_TPA_END_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT (0x7 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+ #define RX_TPA_END_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_END_CMP_FLAGS_ITYPES (0xf << 12)
+ #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+ #define RX_TPA_END_CMP_LEN (0xffff << 16)
+ #define RX_TPA_END_CMP_LEN_SHIFT 16
+
+ u32 rx_tpa_end_cmp_opaque;
+ __le32 rx_tpa_end_cmp_misc_v1;
+ #define RX_TPA_END_CMP_V1 (0x1 << 0)
+ #define RX_TPA_END_CMP_AGG_BUFS (0x3f << 1)
+ #define RX_TPA_END_CMP_AGG_BUFS_SHIFT 1
+ #define RX_TPA_END_CMP_TPA_SEGS (0xff << 8)
+ #define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET (0xff << 16)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
+ #define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
+ #define RX_TPA_END_CMP_AGG_ID_SHIFT 25
+
+ __le32 rx_tpa_end_cmp_tsdelta;
+ #define RX_TPA_END_GRO_TS (0x1 << 31)
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \
+ cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+
+struct rx_tpa_end_cmp_ext {
+ __le32 rx_tpa_end_cmp_dup_acks;
+ #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
+
+ __le32 rx_tpa_end_cmp_seg_len;
+ #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
+
+ __le32 rx_tpa_end_cmp_errors_v2;
+ #define RX_TPA_END_CMP_V2 (0x1 << 0)
+ #define RX_TPA_END_CMP_ERRORS (0x7fff << 1)
+ #define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+
+ u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define DB_IDX_MASK 0xffffff
+#define DB_IDX_VALID (0x1 << 26)
+#define DB_IRQ_DIS (0x1 << 27)
+#define DB_KEY_TX (0x0 << 28)
+#define DB_KEY_RX (0x1 << 28)
+#define DB_KEY_CP (0x2 << 28)
+#define DB_KEY_ST (0x3 << 28)
+#define DB_KEY_TX_PUSH (0x4 << 28)
+#define DB_LONG_TX_PUSH (0x2 << 24)
+
+#define INVALID_HW_RING_ID ((u16)-1)
+
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08
+
+/* The hardware supports certain page sizes. Use the supported page sizes
+ * to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNXT_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNXT_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNXT_PAGE_SHIFT 13
+#else
+#define BNXT_PAGE_SHIFT 16
+#endif
+
+#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+
+#define BNXT_MIN_PKT_SIZE 45
+
+#define BNXT_NUM_TESTS(bp) 0
+
+#define BNXT_DEFAULT_RX_RING_SIZE 1023
+#define BNXT_DEFAULT_TX_RING_SIZE 512
+
+#define MAX_TPA 64
+
+#define MAX_RX_PAGES 8
+#define MAX_RX_AGG_PAGES 32
+#define MAX_TX_PAGES 8
+#define MAX_CP_PAGES 64
+
+#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
+
+#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
+
+#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+
+#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(txcmp, raw_cons) \
+ (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \
+ !((raw_cons) & bp->cp_bit))
+
+#define RX_CMP_VALID(rxcmp1, raw_cons) \
+ (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+ !((raw_cons) & bp->cp_bit))
+
+#define RX_AGG_CMP_VALID(agg, raw_cons) \
+ (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
+ !((raw_cons) & bp->cp_bit))
+
+#define TX_CMP_TYPE(txcmp) \
+ (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
+
+#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
+
+#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
+
+#define ADV_RAW_CMP(idx, n) ((idx) + (n))
+#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
+#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
+#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define HWRM_CMD_TIMEOUT 500
+#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
+#define HWRM_RESP_ERR_CODE_MASK 0xffff
+#define HWRM_RESP_LEN_MASK 0xffff0000
+#define HWRM_RESP_LEN_SFT 16
+#define HWRM_RESP_VALID_MASK 0xff000000
+#define BNXT_HWRM_REQ_MAX_SIZE 128
+#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
+ BNXT_HWRM_REQ_MAX_SIZE)
+
+struct bnxt_sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ u8 is_gso;
+ u8 is_push;
+ unsigned short nr_frags;
+};
+
+struct bnxt_sw_rx_bd {
+ u8 *data;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct bnxt_sw_rx_agg_bd {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
+struct bnxt_ring_struct {
+ int nr_pages;
+ int page_size;
+ void **pg_arr;
+ dma_addr_t *dma_arr;
+
+ __le64 *pg_tbl;
+ dma_addr_t pg_tbl_map;
+
+ int vmem_size;
+ void **vmem;
+
+ u16 fw_ring_id; /* Ring id filled by Chimp FW */
+ u8 queue_id;
+};
+
+struct tx_push_bd {
+ __le32 doorbell;
+ struct tx_bd txbd1;
+ struct tx_bd_ext txbd2;
+};
+
+struct bnxt_tx_ring_info {
+ u16 tx_prod;
+ u16 tx_cons;
+ void __iomem *tx_doorbell;
+
+ struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+ struct bnxt_sw_tx_bd *tx_buf_ring;
+
+ dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
+
+ struct tx_push_bd *tx_push;
+ dma_addr_t tx_push_mapping;
+
+#define BNXT_DEV_STATE_CLOSING 0x1
+ u32 dev_state;
+
+ struct bnxt_ring_struct tx_ring_struct;
+};
+
+struct bnxt_tpa_info {
+ u8 *data;
+ dma_addr_t mapping;
+ u16 len;
+ unsigned short gso_type;
+ u32 flags2;
+ u32 metadata;
+ enum pkt_hash_types hash_type;
+ u32 rss_hash;
+};
+
+struct bnxt_rx_ring_info {
+ u16 rx_prod;
+ u16 rx_agg_prod;
+ u16 rx_sw_agg_prod;
+ void __iomem *rx_doorbell;
+ void __iomem *rx_agg_doorbell;
+
+ struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
+ struct bnxt_sw_rx_bd *rx_buf_ring;
+
+ struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+ struct bnxt_sw_rx_agg_bd *rx_agg_ring;
+
+ unsigned long *rx_agg_bmap;
+ u16 rx_agg_bmap_size;
+
+ dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
+ dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+ struct bnxt_tpa_info *rx_tpa;
+
+ struct bnxt_ring_struct rx_ring_struct;
+ struct bnxt_ring_struct rx_agg_ring_struct;
+};
+
+struct bnxt_cp_ring_info {
+ u32 cp_raw_cons;
+ void __iomem *cp_doorbell;
+
+ struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+
+ dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
+
+ struct ctx_hw_stats *hw_stats;
+ dma_addr_t hw_stats_map;
+ u32 hw_stats_ctx_id;
+ u64 rx_l4_csum_errors;
+
+ struct bnxt_ring_struct cp_ring_struct;
+};
+
+struct bnxt_napi {
+ struct napi_struct napi;
+ struct bnxt *bp;
+
+ int index;
+ struct bnxt_cp_ring_info cp_ring;
+ struct bnxt_rx_ring_info rx_ring;
+ struct bnxt_tx_ring_info tx_ring;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ atomic_t poll_state;
+#endif
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+enum bnxt_poll_state_t {
+ BNXT_STATE_IDLE = 0,
+ BNXT_STATE_NAPI,
+ BNXT_STATE_POLL,
+ BNXT_STATE_DISABLE,
+};
+#endif
+
+struct bnxt_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested;
+ char name[IFNAMSIZ + 2];
+};
+
+#define HWRM_RING_ALLOC_TX 0x1
+#define HWRM_RING_ALLOC_RX 0x2
+#define HWRM_RING_ALLOC_AGG 0x4
+#define HWRM_RING_ALLOC_CMPL 0x8
+
+#define INVALID_STATS_CTX_ID -1
+
+struct hwrm_cmd_req_hdr {
+#define HWRM_CMPL_RING_MASK 0xffff0000
+#define HWRM_CMPL_RING_SFT 16
+ __le32 cmpl_ring_req_type;
+#define HWRM_SEQ_ID_MASK 0xffff
+#define HWRM_SEQ_ID_INVALID -1
+#define HWRM_RESP_LEN_OFFSET 4
+#define HWRM_TARGET_FID_MASK 0xffff0000
+#define HWRM_TARGET_FID_SFT 16
+ __le32 target_id_seq_id;
+ __le64 resp_addr;
+};
+
+struct bnxt_ring_grp_info {
+ u16 fw_stats_ctx;
+ u16 fw_grp_id;
+ u16 rx_fw_ring_id;
+ u16 agg_fw_ring_id;
+ u16 cp_fw_ring_id;
+};
+
+struct bnxt_vnic_info {
+ u16 fw_vnic_id; /* returned by Chimp during alloc */
+ u16 fw_rss_cos_lb_ctx;
+ u16 fw_l2_ctx_id;
+#define BNXT_MAX_UC_ADDRS 4
+ __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+ /* index 0 always dev_addr */
+ u16 uc_filter_count;
+ u8 *uc_list;
+
+ u16 *fw_grp_ids;
+ u16 hash_type;
+ dma_addr_t rss_table_dma_addr;
+ __le16 *rss_table;
+ dma_addr_t rss_hash_key_dma_addr;
+ u64 *rss_hash_key;
+ u32 rx_mask;
+
+ u8 *mc_list;
+ int mc_list_size;
+ int mc_list_count;
+ dma_addr_t mc_list_mapping;
+#define BNXT_MAX_MC_ADDRS 16
+
+ u32 flags;
+#define BNXT_VNIC_RSS_FLAG 1
+#define BNXT_VNIC_RFS_FLAG 2
+#define BNXT_VNIC_MCAST_FLAG 4
+#define BNXT_VNIC_UCAST_FLAG 8
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+ u16 fw_fid;
+ u8 mac_addr[ETH_ALEN];
+ u16 max_rsscos_ctxs;
+ u16 max_cp_rings;
+ u16 max_tx_rings;
+ u16 max_rx_rings;
+ u16 max_l2_ctxs;
+ u16 max_irqs;
+ u16 max_vnics;
+ u16 max_stat_ctxs;
+ u16 vlan;
+ u32 flags;
+#define BNXT_VF_QOS 0x1
+#define BNXT_VF_SPOOFCHK 0x2
+#define BNXT_VF_LINK_FORCED 0x4
+#define BNXT_VF_LINK_UP 0x8
+ u32 func_flags; /* func cfg flags */
+ u32 min_tx_rate;
+ u32 max_tx_rate;
+ void *hwrm_cmd_req_addr;
+ dma_addr_t hwrm_cmd_req_dma_addr;
+};
+#endif
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID 1
+#define BNXT_FIRST_VF_FID 128
+ u32 fw_fid;
+ u8 port_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 max_rsscos_ctxs;
+ u16 max_cp_rings;
+ u16 max_tx_rings; /* HW assigned max tx rings for this PF */
+ u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */
+ u16 max_rx_rings; /* HW assigned max rx rings for this PF */
+ u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */
+ u16 max_irqs;
+ u16 max_l2_ctxs;
+ u16 max_vnics;
+ u16 max_stat_ctxs;
+ u32 first_vf_id;
+ u16 active_vfs;
+ u16 max_vfs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
+ unsigned long *vf_event_bmap;
+ u16 hwrm_cmd_req_pages;
+ void *hwrm_cmd_req_addr[4];
+ dma_addr_t hwrm_cmd_req_dma_addr[4];
+ struct bnxt_vf_info *vf;
+};
+
+struct bnxt_ntuple_filter {
+ struct hlist_node hash;
+ u8 src_mac_addr[ETH_ALEN];
+ struct flow_keys fkeys;
+ __le64 filter_id;
+ u16 sw_id;
+ u16 rxq;
+ u32 flow_id;
+ unsigned long state;
+#define BNXT_FLTR_VALID 0
+#define BNXT_FLTR_UPDATE 1
+};
+
+#define BNXT_ALL_COPPER_ETHTOOL_SPEED \
+ (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \
+ ADVERTISED_10000baseT_Full)
+
+struct bnxt_link_info {
+ u8 media_type;
+ u8 transceiver;
+ u8 phy_addr;
+ u8 phy_link_status;
+#define BNXT_LINK_NO_LINK PORT_PHY_QCFG_RESP_LINK_NO_LINK
+#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL
+#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 wire_speed;
+ u8 loop_back;
+ u8 link_up;
+ u8 duplex;
+#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF
+#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL
+ u8 pause;
+#define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX
+#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX
+#define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \
+ PORT_PHY_QCFG_RESP_PAUSE_TX)
+ u8 auto_pause_setting;
+ u8 force_pause_setting;
+ u8 duplex_setting;
+ u8 auto_mode;
+#define BNXT_AUTO_MODE(mode) ((mode) > BNXT_LINK_AUTO_NONE && \
+ (mode) <= BNXT_LINK_AUTO_MSK)
+#define BNXT_LINK_AUTO_NONE PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
+#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
+#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
+#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
+#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define PHY_VER_LEN 3
+ u8 phy_ver[PHY_VER_LEN];
+ u16 link_speed;
+#define BNXT_LINK_SPEED_100MB PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
+#define BNXT_LINK_SPEED_1GB PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
+#define BNXT_LINK_SPEED_2GB PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
+#define BNXT_LINK_SPEED_2_5GB PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
+#define BNXT_LINK_SPEED_10GB PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
+#define BNXT_LINK_SPEED_20GB PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
+#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
+#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+ u16 support_speeds;
+ u16 auto_link_speeds;
+#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
+#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
+#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
+#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
+#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
+#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
+#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
+#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
+#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+ u16 auto_link_speed;
+ u16 force_link_speed;
+ u32 preemphasis;
+
+ /* copy of requested setting from ethtool cmd */
+ u8 autoneg;
+#define BNXT_AUTONEG_SPEED 1
+#define BNXT_AUTONEG_FLOW_CTRL 2
+ u8 req_duplex;
+ u8 req_flow_ctrl;
+ u16 req_link_speed;
+ u32 advertising;
+ bool force_link_chng;
+ /* a copy of phy_qcfg output used to report link
+ * info to VF
+ */
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+};
+
+#define BNXT_MAX_QUEUE 8
+
+struct bnxt_queue_info {
+ u8 queue_id;
+ u8 queue_profile;
+};
+
+struct bnxt {
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *bar2;
+
+ u32 reg_base;
+
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ atomic_t intr_sem;
+
+ u32 flags;
+ #define BNXT_FLAG_DCB_ENABLED 0x1
+ #define BNXT_FLAG_VF 0x2
+ #define BNXT_FLAG_LRO 0x4
+#ifdef CONFIG_INET
+ #define BNXT_FLAG_GRO 0x8
+#else
+ /* Cannot support hardware GRO if CONFIG_INET is not set */
+ #define BNXT_FLAG_GRO 0x0
+#endif
+ #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
+ #define BNXT_FLAG_JUMBO 0x10
+ #define BNXT_FLAG_STRIP_VLAN 0x20
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO)
+ #define BNXT_FLAG_USING_MSIX 0x40
+ #define BNXT_FLAG_MSIX_CAP 0x80
+ #define BNXT_FLAG_RFS 0x100
+ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
+ BNXT_FLAG_RFS | \
+ BNXT_FLAG_STRIP_VLAN)
+
+#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+
+ struct bnxt_napi **bnapi;
+
+ u32 rx_buf_size;
+ u32 rx_buf_use_size; /* useable size */
+ u32 rx_ring_size;
+ u32 rx_agg_ring_size;
+ u32 rx_copy_thresh;
+ u32 rx_ring_mask;
+ u32 rx_agg_ring_mask;
+ int rx_nr_pages;
+ int rx_agg_nr_pages;
+ int rx_nr_rings;
+ int rsscos_nr_ctxs;
+
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ int tx_nr_pages;
+ int tx_nr_rings;
+ int tx_nr_rings_per_tc;
+
+ int tx_wake_thresh;
+ int tx_push_thresh;
+ int tx_push_size;
+
+ u32 cp_ring_size;
+ u32 cp_ring_mask;
+ u32 cp_bit;
+ int cp_nr_pages;
+ int cp_nr_rings;
+
+ int num_stat_ctxs;
+ struct bnxt_ring_grp_info *grp_info;
+ struct bnxt_vnic_info *vnic_info;
+ int nr_vnics;
+
+ u8 max_tc;
+ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
+
+ unsigned int current_interval;
+#define BNXT_TIMER_INTERVAL (HZ / 2)
+
+ struct timer_list timer;
+
+ int state;
+#define BNXT_STATE_CLOSED 0
+#define BNXT_STATE_OPEN 1
+
+ struct bnxt_irq *irq_tbl;
+ u8 mac_addr[ETH_ALEN];
+
+ u32 msg_enable;
+
+ u16 hwrm_cmd_seq;
+ u32 hwrm_intr_seq_id;
+ void *hwrm_cmd_resp_addr;
+ dma_addr_t hwrm_cmd_resp_dma_addr;
+ void *hwrm_dbg_resp_addr;
+ dma_addr_t hwrm_dbg_resp_dma_addr;
+#define HWRM_DBG_REG_BUF_SIZE 128
+ struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
+ struct hwrm_ver_get_output ver_resp;
+#define FW_VER_STR_LEN 32
+#define BC_HWRM_STR_LEN 21
+#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
+ char fw_ver_str[FW_VER_STR_LEN];
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
+ __le16 vxlan_fw_dst_port_id;
+ u8 nge_port_cnt;
+ __le16 nge_fw_dst_port_id;
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+
+#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
+#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
+
+ struct work_struct sp_task;
+ unsigned long sp_event;
+#define BNXT_RX_MASK_SP_EVENT 0
+#define BNXT_RX_NTP_FLTR_SP_EVENT 1
+#define BNXT_LINK_CHNG_SP_EVENT 2
+#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 4
+#define BNXT_VXLAN_ADD_PORT_SP_EVENT 8
+#define BNXT_VXLAN_DEL_PORT_SP_EVENT 16
+#define BNXT_RESET_TASK_SP_EVENT 32
+#define BNXT_RST_RING_SP_EVENT 64
+
+ struct bnxt_pf_info pf;
+#ifdef CONFIG_BNXT_SRIOV
+ int nr_vfs;
+ struct bnxt_vf_info vf;
+ wait_queue_head_t sriov_cfg_wait;
+ bool sriov_cfg;
+#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
+#endif
+
+#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_NTP_FLTR_HASH_SIZE 512
+#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
+ struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
+ spinlock_t ntp_fltr_lock; /* for hash table add, del */
+
+ unsigned long *ntp_fltr_bmap;
+ int ntp_fltr_count;
+
+ struct bnxt_link_info link_info;
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the NAPI poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+ int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_NAPI);
+
+ return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the busy poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+ int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_POLL);
+
+ return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+ return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+ int old;
+
+ while (1) {
+ old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_DISABLE);
+ if (old == BNXT_STATE_IDLE)
+ break;
+ usleep_range(500, 5000);
+ }
+}
+
+#else
+
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+ return true;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+ return false;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+ return false;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+#endif
+
+void bnxt_set_ring_params(struct bnxt *);
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
+int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_set_coal(struct bnxt *);
+int bnxt_hwrm_set_pause(struct bnxt *);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_get_max_rings(struct bnxt *, int *, int *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
new file mode 100644
index 000000000000..45bd628eaf3a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -0,0 +1,1149 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ethtool.h"
+#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
+#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
+#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+
+static u32 bnxt_get_msglevel(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return bp->msg_enable;
+}
+
+static void bnxt_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ bp->msg_enable = value;
+}
+
+static int bnxt_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ memset(coal, 0, sizeof(*coal));
+
+ coal->rx_coalesce_usecs =
+ max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
+ coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
+ coal->rx_coalesce_usecs_irq =
+ max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
+ coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
+
+ return 0;
+}
+
+static int bnxt_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
+ bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
+ bp->coal_ticks_irq =
+ BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
+ bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_coal(bp);
+
+ return rc;
+}
+
+#define BNXT_NUM_STATS 21
+
+static int bnxt_get_sset_count(struct net_device *dev, int sset)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return BNXT_NUM_STATS * bp->cp_nr_rings;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bnxt_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ u32 i, j = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
+ u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+
+ memset(buf, 0, buf_size);
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ int k;
+
+ for (k = 0; k < stat_fields; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j++] = cpr->rx_l4_csum_errors;
+ }
+}
+
+static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 i;
+
+ switch (stringset) {
+ /* The number of strings must match BNXT_NUM_STATS defined above. */
+ case ETH_SS_STATS:
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ sprintf(buf, "[%d]: rx_ucast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_mcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_bcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_discards", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_drops", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_ucast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_mcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_bcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_ucast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_mcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_bcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_discards", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_drops", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_ucast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_mcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_bcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_events", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_aborts", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_l4_csum_errors", i);
+ buf += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
+ stringset);
+ break;
+ }
+}
+
+static void bnxt_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+ ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
+
+ ering->rx_pending = bp->rx_ring_size;
+ ering->rx_jumbo_pending = bp->rx_agg_ring_size;
+ ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnxt_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
+ (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS))
+ return -EINVAL;
+
+ if (netif_running(dev))
+ bnxt_close_nic(bp, false, false);
+
+ bp->rx_ring_size = ering->rx_pending;
+ bp->tx_ring_size = ering->tx_pending;
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, false, false);
+
+ return 0;
+}
+
+static void bnxt_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int max_rx_rings, max_tx_rings, tcs;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1)
+ max_tx_rings /= tcs;
+
+ channel->max_rx = max_rx_rings;
+ channel->max_tx = max_tx_rings;
+ channel->max_other = 0;
+ channel->max_combined = 0;
+ channel->rx_count = bp->rx_nr_rings;
+ channel->tx_count = bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int max_rx_rings, max_tx_rings, tcs;
+ u32 rc = 0;
+
+ if (channel->other_count || channel->combined_count ||
+ !channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1)
+ max_tx_rings /= tcs;
+
+ if (channel->rx_count > max_rx_rings ||
+ channel->tx_count > max_tx_rings)
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ if (BNXT_PF(bp)) {
+ /* TODO CHIMP_FW: Send message to all VF's
+ * before PF unload
+ */
+ }
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ netdev_err(bp->dev, "Set channel failure rc :%x\n",
+ rc);
+ return rc;
+ }
+ }
+
+ bp->rx_nr_rings = channel->rx_count;
+ bp->tx_nr_rings_per_tc = channel->tx_count;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ if (tcs > 1)
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (netif_running(dev)) {
+ rc = bnxt_open_nic(bp, true, false);
+ if ((!rc) && BNXT_PF(bp)) {
+ /* TODO CHIMP_FW: Send message to all VF's
+ * to renable
+ */
+ }
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int i, j = 0;
+
+ cmd->data = bp->ntp_fltr_count;
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct bnxt_ntuple_filter *fltr;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (j == cmd->rule_cnt)
+ break;
+ rule_locs[j++] = fltr->sw_id;
+ }
+ rcu_read_unlock();
+ if (j == cmd->rule_cnt)
+ break;
+ }
+ cmd->rule_cnt = j;
+ return 0;
+}
+
+static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct bnxt_ntuple_filter *fltr;
+ struct flow_keys *fkeys;
+ int i, rc = -EINVAL;
+
+ if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ return rc;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (fltr->sw_id == fs->location)
+ goto fltr_found;
+ }
+ rcu_read_unlock();
+ }
+ return rc;
+
+fltr_found:
+ fkeys = &fltr->fkeys;
+ if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ fs->flow_type = TCP_V4_FLOW;
+ else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ fs->flow_type = UDP_V4_FLOW;
+ else
+ goto fltr_err;
+
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+
+ fs->ring_cookie = fltr->rxq;
+ rc = 0;
+
+fltr_err:
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = bp->rx_nr_rings;
+ break;
+
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bp->ntp_fltr_count;
+ cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+ break;
+
+ case ETHTOOL_GRXCLSRLALL:
+ rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
+ break;
+
+ case ETHTOOL_GRXCLSRULE:
+ rc = bnxt_grxclsrule(bp, cmd);
+ break;
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+#endif
+
+static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+{
+ return HW_HASH_INDEX_SIZE;
+}
+
+static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
+{
+ return HW_HASH_KEY_SIZE;
+}
+
+static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ int i = 0;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (indir)
+ for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
+ indir[i] = le16_to_cpu(vnic->rss_table[i]);
+
+ if (key)
+ memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ return 0;
+}
+
+static void bnxt_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ info->testinfo_len = BNXT_NUM_TESTS(bp);
+ /* TODO CHIMP_FW: eeprom dump details */
+ info->eedump_len = 0;
+ /* TODO CHIMP FW: reg dump details */
+ info->regdump_len = 0;
+}
+
+static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->support_speeds;
+ u32 speed_mask = 0;
+
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+ speed_mask |= SUPPORTED_100baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+ speed_mask |= SUPPORTED_1000baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+ speed_mask |= SUPPORTED_2500baseX_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+ speed_mask |= SUPPORTED_10000baseT_Full;
+ /* TODO: support 25GB, 50GB with different cable type */
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+ speed_mask |= SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+ speed_mask |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full;
+
+ return speed_mask;
+}
+
+static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->auto_link_speeds;
+ u32 speed_mask = 0;
+
+ /* TODO: support 25GB, 40GB, 50GB with different cable type */
+ /* set the advertised speeds */
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+ speed_mask |= ADVERTISED_100baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+ speed_mask |= ADVERTISED_1000baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+ speed_mask |= ADVERTISED_2500baseX_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+ speed_mask |= ADVERTISED_10000baseT_Full;
+ /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+ speed_mask |= ADVERTISED_20000baseMLD2_Full |
+ ADVERTISED_20000baseKR2_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+ speed_mask |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_40000baseCR4_Full |
+ ADVERTISED_40000baseSR4_Full |
+ ADVERTISED_40000baseLR4_Full;
+ return speed_mask;
+}
+
+u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+{
+ switch (fw_link_speed) {
+ case BNXT_LINK_SPEED_100MB:
+ return SPEED_100;
+ case BNXT_LINK_SPEED_1GB:
+ return SPEED_1000;
+ case BNXT_LINK_SPEED_2_5GB:
+ return SPEED_2500;
+ case BNXT_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case BNXT_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case BNXT_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case BNXT_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case BNXT_LINK_SPEED_50GB:
+ return SPEED_50000;
+ default:
+ return SPEED_UNKNOWN;
+ }
+}
+
+static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 ethtool_speed;
+
+ cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+
+ if (link_info->auto_link_speeds)
+ cmd->supported |= SUPPORTED_Autoneg;
+
+ if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ cmd->advertising =
+ bnxt_fw_to_ethtool_advertised_spds(link_info);
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->advertising = 0;
+ }
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+ BNXT_LINK_PAUSE_BOTH) {
+ cmd->advertising |= ADVERTISED_Pause;
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ cmd->supported |= SUPPORTED_Asym_Pause;
+ if (link_info->auto_pause_setting &
+ BNXT_LINK_PAUSE_RX)
+ cmd->advertising |= ADVERTISED_Pause;
+ }
+ } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+ BNXT_LINK_PAUSE_BOTH) {
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->supported |= SUPPORTED_Asym_Pause;
+ if (link_info->force_pause_setting &
+ BNXT_LINK_PAUSE_RX)
+ cmd->supported |= SUPPORTED_Pause;
+ }
+ }
+
+ cmd->port = PORT_NONE;
+ if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ cmd->port = PORT_TP;
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ } else {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+
+ if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+ cmd->port = PORT_DA;
+ else if (link_info->media_type ==
+ PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+ cmd->port = PORT_FIBRE;
+ }
+
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
+ if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ }
+ ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ ethtool_cmd_speed_set(cmd, ethtool_speed);
+ if (link_info->transceiver ==
+ PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+ cmd->transceiver = XCVR_INTERNAL;
+ else
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->phy_address = link_info->phy_addr;
+
+ return 0;
+}
+
+static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+{
+ switch (ethtool_speed) {
+ case SPEED_100:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ case SPEED_1000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ case SPEED_2500:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ case SPEED_10000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ case SPEED_20000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ case SPEED_25000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ case SPEED_40000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ case SPEED_50000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ default:
+ netdev_err(dev, "unsupported speed!\n");
+ break;
+ }
+ return 0;
+}
+
+static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+{
+ u16 fw_speed_mask = 0;
+
+ /* only support autoneg at speed 100, 1000, and 10000 */
+ if (advertising & (ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half)) {
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
+ }
+ if (advertising & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half)) {
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
+ }
+ if (advertising & ADVERTISED_10000baseT_Full)
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
+
+ return fw_speed_mask;
+}
+
+static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ int rc = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u32 speed, fw_advertising = 0;
+ bool set_pause = false;
+
+ if (BNXT_VF(bp))
+ return rc;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ netdev_err(dev, "Media type doesn't support autoneg\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED |
+ ADVERTISED_Autoneg |
+ ADVERTISED_TP |
+ ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause)) {
+ netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
+ cmd->advertising);
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
+ if (fw_advertising & ~link_info->support_speeds) {
+ netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
+ cmd->advertising);
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
+ if (!fw_advertising)
+ link_info->advertising = link_info->support_speeds;
+ else
+ link_info->advertising = fw_advertising;
+ /* any change to autoneg will cause link change, therefore the
+ * driver should put back the original pause setting in autoneg
+ */
+ set_pause = true;
+ } else {
+ /* TODO: currently don't support half duplex */
+ if (cmd->duplex == DUPLEX_HALF) {
+ netdev_err(dev, "HALF DUPLEX is not supported!\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ /* If received a request for an unknown duplex, assume full*/
+ if (cmd->duplex == DUPLEX_UNKNOWN)
+ cmd->duplex = DUPLEX_FULL;
+ speed = ethtool_cmd_speed(cmd);
+ link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+ link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+ link_info->advertising = 0;
+ }
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+
+set_setting_exit:
+ return rc;
+}
+
+static void bnxt_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ return;
+ epause->autoneg = !!(link_info->auto_pause_setting &
+ BNXT_LINK_PAUSE_BOTH);
+ epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
+ epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
+}
+
+static int bnxt_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ int rc = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ return rc;
+
+ if (epause->autoneg) {
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+ } else {
+ /* when transition from auto pause to force pause,
+ * force a link change
+ */
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ link_info->force_link_chng = true;
+ link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+ }
+ if (epause->rx_pause)
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
+ else
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
+
+ if (epause->tx_pause)
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
+ else
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_pause(bp);
+ return rc;
+}
+
+static u32 bnxt_get_link(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ /* TODO: handle MF, VF, driver close case */
+ return bp->link_info.link_up;
+}
+
+static int bnxt_flash_nvram(struct net_device *dev,
+ u16 dir_type,
+ u16 dir_ordinal,
+ u16 dir_ext,
+ u16 dir_attr,
+ const u8 *data,
+ size_t data_len)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_write_input req = {0};
+ dma_addr_t dma_handle;
+ u8 *kmem;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+
+ req.dir_type = cpu_to_le16(dir_type);
+ req.dir_ordinal = cpu_to_le16(dir_ordinal);
+ req.dir_ext = cpu_to_le16(dir_ext);
+ req.dir_attr = cpu_to_le16(dir_attr);
+ req.dir_data_length = cpu_to_le32(data_len);
+
+ kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
+ GFP_KERNEL);
+ if (!kmem) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)data_len);
+ return -ENOMEM;
+ }
+ memcpy(kmem, data, data_len);
+ req.host_src_addr = cpu_to_le64(dma_handle);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
+ dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+
+ return rc;
+}
+
+static int bnxt_flash_firmware(struct net_device *dev,
+ u16 dir_type,
+ const u8 *fw_data,
+ size_t fw_size)
+{
+ int rc = 0;
+ u16 code_type;
+ u32 stored_crc;
+ u32 calculated_crc;
+ struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
+
+ switch (dir_type) {
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ code_type = CODE_BOOT;
+ break;
+ default:
+ netdev_err(dev, "Unsupported directory entry type: %u\n",
+ dir_type);
+ return -EINVAL;
+ }
+ if (fw_size < sizeof(struct bnxt_fw_header)) {
+ netdev_err(dev, "Invalid firmware file size: %u\n",
+ (unsigned int)fw_size);
+ return -EINVAL;
+ }
+ if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
+ netdev_err(dev, "Invalid firmware signature: %08X\n",
+ le32_to_cpu(header->signature));
+ return -EINVAL;
+ }
+ if (header->code_type != code_type) {
+ netdev_err(dev, "Expected firmware type: %d, read: %d\n",
+ code_type, header->code_type);
+ return -EINVAL;
+ }
+ if (header->device != DEVICE_CUMULUS_FAMILY) {
+ netdev_err(dev, "Expected firmware device family %d, read: %d\n",
+ DEVICE_CUMULUS_FAMILY, header->device);
+ return -EINVAL;
+ }
+ /* Confirm the CRC32 checksum of the file: */
+ stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+ sizeof(stored_crc)));
+ calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+ if (calculated_crc != stored_crc) {
+ netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
+ (unsigned long)stored_crc,
+ (unsigned long)calculated_crc);
+ return -EINVAL;
+ }
+ /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw_data, fw_size);
+ if (rc == 0) { /* Firmware update successful */
+ /* TODO: Notify processor it needs to reset itself
+ */
+ }
+ return rc;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ case BNX_DIR_TYPE_APE_FW:
+ case BNX_DIR_TYPE_APE_PATCH:
+ case BNX_DIR_TYPE_KONG_FW:
+ case BNX_DIR_TYPE_KONG_PATCH:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_AVS:
+ case BNX_DIR_TYPE_EXP_ROM_MBA:
+ case BNX_DIR_TYPE_PCIE:
+ case BNX_DIR_TYPE_TSCF_UCODE:
+ case BNX_DIR_TYPE_EXT_PHY:
+ case BNX_DIR_TYPE_CCM:
+ case BNX_DIR_TYPE_ISCSI_BOOT:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_executable(u16 dir_type)
+{
+ return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+ bnxt_dir_type_is_unprotected_exec_format(dir_type);
+}
+
+static int bnxt_flash_firmware_from_file(struct net_device *dev,
+ u16 dir_type,
+ const char *filename)
+{
+ const struct firmware *fw;
+ int rc;
+
+ if (bnxt_dir_type_is_executable(dir_type) == false)
+ return -EINVAL;
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "Error %d requesting firmware file: %s\n",
+ rc, filename);
+ return rc;
+ }
+ if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+ rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+ else
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw->data, fw->size);
+ release_firmware(fw);
+ return rc;
+}
+
+static int bnxt_flash_package_from_file(struct net_device *dev,
+ char *filename)
+{
+ netdev_err(dev, "packages are not yet supported\n");
+ return -EINVAL;
+}
+
+static int bnxt_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
+ netdev_err(dev, "flashdev not supported from a virtual function\n");
+ return -EINVAL;
+ }
+
+ if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
+ return bnxt_flash_package_from_file(dev, flash->data);
+
+ return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
+}
+
+static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_get_dir_info_input req = {0};
+ struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ *entries = le32_to_cpu(output->entries);
+ *length = le32_to_cpu(output->entry_length);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_get_eeprom_len(struct net_device *dev)
+{
+ /* The -1 return value allows the entire 32-bit range of offsets to be
+ * passed via the ethtool command-line utility.
+ */
+ return -1;
+}
+
+static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ u32 dir_entries;
+ u32 entry_length;
+ u8 *buf;
+ size_t buflen;
+ dma_addr_t dma_handle;
+ struct hwrm_nvm_get_dir_entries_input req = {0};
+
+ rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
+ if (rc != 0)
+ return rc;
+
+ /* Insert 2 bytes of directory info (count and size of entries) */
+ if (len < 2)
+ return -EINVAL;
+
+ *data++ = dir_entries;
+ *data++ = entry_length;
+ len -= 2;
+ memset(data, 0xff, len);
+
+ buflen = dir_entries * entry_length;
+ buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
+ GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)buflen);
+ return -ENOMEM;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
+ req.host_dest_addr = cpu_to_le64(dma_handle);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0)
+ memcpy(data, buf, len > buflen ? buflen : len);
+ dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ u8 *buf;
+ dma_addr_t dma_handle;
+ struct hwrm_nvm_read_input req = {0};
+
+ buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
+ GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)length);
+ return -ENOMEM;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
+ req.host_dest_addr = cpu_to_le64(dma_handle);
+ req.dir_idx = cpu_to_le16(index);
+ req.offset = cpu_to_le32(offset);
+ req.len = cpu_to_le32(length);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0)
+ memcpy(data, buf, length);
+ dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ u32 index;
+ u32 offset;
+
+ if (eeprom->offset == 0) /* special offset value to get directory */
+ return bnxt_get_nvram_directory(dev, eeprom->len, data);
+
+ index = eeprom->offset >> 24;
+ offset = eeprom->offset & 0xffffff;
+
+ if (index == 0) {
+ netdev_err(dev, "unsupported index value: %d\n", index);
+ return -EINVAL;
+ }
+
+ return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
+}
+
+static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_nvm_erase_dir_entry_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
+ req.dir_idx = cpu_to_le16(index);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 index, dir_op;
+ u16 type, ext, ordinal, attr;
+
+ if (!BNXT_PF(bp)) {
+ netdev_err(dev, "NVM write not supported from a virtual function\n");
+ return -EINVAL;
+ }
+
+ type = eeprom->magic >> 16;
+
+ if (type == 0xffff) { /* special value for directory operations */
+ index = eeprom->magic & 0xff;
+ dir_op = eeprom->magic >> 8;
+ if (index == 0)
+ return -EINVAL;
+ switch (dir_op) {
+ case 0x0e: /* erase */
+ if (eeprom->offset != ~eeprom->magic)
+ return -EINVAL;
+ return bnxt_erase_nvram_directory(dev, index - 1);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Create or re-write an NVM item: */
+ if (bnxt_dir_type_is_executable(type) == true)
+ return -EINVAL;
+ ext = eeprom->magic & 0xffff;
+ ordinal = eeprom->offset >> 16;
+ attr = eeprom->offset & 0xffff;
+
+ return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+ eeprom->len);
+}
+
+const struct ethtool_ops bnxt_ethtool_ops = {
+ .get_settings = bnxt_get_settings,
+ .set_settings = bnxt_set_settings,
+ .get_pauseparam = bnxt_get_pauseparam,
+ .set_pauseparam = bnxt_set_pauseparam,
+ .get_drvinfo = bnxt_get_drvinfo,
+ .get_coalesce = bnxt_get_coalesce,
+ .set_coalesce = bnxt_set_coalesce,
+ .get_msglevel = bnxt_get_msglevel,
+ .set_msglevel = bnxt_set_msglevel,
+ .get_sset_count = bnxt_get_sset_count,
+ .get_strings = bnxt_get_strings,
+ .get_ethtool_stats = bnxt_get_ethtool_stats,
+ .set_ringparam = bnxt_set_ringparam,
+ .get_ringparam = bnxt_get_ringparam,
+ .get_channels = bnxt_get_channels,
+ .set_channels = bnxt_set_channels,
+#ifdef CONFIG_RFS_ACCEL
+ .get_rxnfc = bnxt_get_rxnfc,
+#endif
+ .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
+ .get_rxfh_key_size = bnxt_get_rxfh_key_size,
+ .get_rxfh = bnxt_get_rxfh,
+ .flash_device = bnxt_flash_device,
+ .get_eeprom_len = bnxt_get_eeprom_len,
+ .get_eeprom = bnxt_get_eeprom,
+ .set_eeprom = bnxt_set_eeprom,
+ .get_link = bnxt_get_link,
+};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
new file mode 100644
index 000000000000..98fa81e08b58
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -0,0 +1,17 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ETHTOOL_H
+#define BNXT_ETHTOOL_H
+
+extern const struct ethtool_ops bnxt_ethtool_ops;
+
+u32 bnxt_fw_to_ethtool_speed(u16);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
new file mode 100644
index 000000000000..e0aac65c6d82
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -0,0 +1,104 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __BNXT_FW_HDR_H__
+#define __BNXT_FW_HDR_H__
+
+#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */
+
+enum SUPPORTED_FAMILY {
+ DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */
+ DEVICE_5705_FAMILY, /* 1 - Bachelor */
+ DEVICE_SHASTA_FAMILY, /* 2 - 5751 */
+ DEVICE_5706_FAMILY, /* 3 - Teton */
+ DEVICE_5714_FAMILY, /* 4 - Hamilton */
+ DEVICE_STANFORD_FAMILY, /* 5 - 5755 */
+ DEVICE_STANFORD_ME_FAMILY, /* 6 - 5756 */
+ DEVICE_SOLEDAD_FAMILY, /* 7 - 5761[E] */
+ DEVICE_CILAI_FAMILY, /* 8 - 57780/60/90/91 */
+ DEVICE_ASPEN_FAMILY, /* 9 - 57781/85/61/65/91/95 */
+ DEVICE_ASPEN_PLUS_FAMILY, /* 10 - 57786 */
+ DEVICE_LOGAN_FAMILY, /* 11 - Any device in the Logan family
+ */
+ DEVICE_LOGAN_5762, /* 12 - Logan Enterprise (aka Columbia)
+ */
+ DEVICE_LOGAN_57767, /* 13 - Logan Client */
+ DEVICE_LOGAN_57787, /* 14 - Logan Consumer */
+ DEVICE_LOGAN_5725, /* 15 - Logan Server (TruManage-enabled)
+ */
+ DEVICE_SAWTOOTH_FAMILY, /* 16 - 5717/18 */
+ DEVICE_COTOPAXI_FAMILY, /* 17 - 5719 */
+ DEVICE_SNAGGLETOOTH_FAMILY, /* 18 - 5720 */
+ DEVICE_CUMULUS_FAMILY, /* 19 - Cumulus/Whitney */
+ MAX_DEVICE_FAMILY
+};
+
+enum SUPPORTED_CODE {
+ CODE_ASF1, /* 0 - ASF VERSION 1.03 <deprecated> */
+ CODE_ASF2, /* 1 - ASF VERSION 2.00 <deprecated> */
+ CODE_PASSTHRU, /* 2 - PassThru <deprecated> */
+ CODE_PT_SEC, /* 3 - PassThru with security <deprecated> */
+ CODE_UMP, /* 4 - UMP <deprecated> */
+ CODE_BOOT, /* 5 - Bootcode */
+ CODE_DASH, /* 6 - TruManage (DASH + ASF + PMCI)
+ * Management firmwares
+ */
+ CODE_MCTP_PASSTHRU, /* 7 - NCSI / MCTP Passt-hrough firmware */
+ CODE_PM_OFFLOAD, /* 8 - Power-Management Proxy Offload firmwares
+ */
+ CODE_MDNS_SD_OFFLOAD, /* 9 - Multicast DNS Service Discovery Proxys
+ * Offload firmware
+ */
+ CODE_DISC_OFFLOAD, /* 10 - Discovery Offload firmware */
+ CODE_MUSTANG, /* 11 - I2C Error reporting APE firmwares
+ * <deprecated>
+ */
+ CODE_ARP_BATCH, /* 12 - ARP Batch firmware */
+ CODE_SMASH, /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI)
+ * Management firmware
+ */
+ CODE_APE_DIAG, /* 14 - APE Test Diag firmware */
+ CODE_APE_PATCH, /* 15 - APE Patch firmware */
+ CODE_TANG_PATCH, /* 16 - TANG Patch firmware */
+ CODE_KONG_FW, /* 17 - KONG firmware */
+ CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
+ CODE_BONO_FW, /* 19 - BONO firmware */
+ CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
+
+ MAX_CODE_TYPE,
+};
+
+enum SUPPORTED_MEDIA {
+ MEDIA_COPPER, /* 0 */
+ MEDIA_FIBER, /* 1 */
+ MEDIA_NONE, /* 2 */
+ MEDIA_COPPER_FIBER, /* 3 */
+ MAX_MEDIA_TYPE,
+};
+
+struct bnxt_fw_header {
+ __le32 signature; /* constains the constant value of
+ * BNXT_Firmware_Bin_Signatures
+ */
+ u8 flags; /* reserved for ChiMP use */
+ u8 code_type; /* enum SUPPORTED_CODE */
+ u8 device; /* enum SUPPORTED_FAMILY */
+ u8 media; /* enum SUPPORTED_MEDIA */
+ u8 version[16]; /* the null terminated version string to
+ * indicate the version of the
+ * file, this will be copied from the binary
+ * file version string
+ */
+ u8 build;
+ u8 revision;
+ u8 minor_ver;
+ u8 major_ver;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
new file mode 100644
index 000000000000..70fc8253c07f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -0,0 +1,4046 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HSI_H
+#define BNXT_HSI_H
+
+/* per-context HW statistics -- chip view */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* Statistics Ejection Buffer Completion Record (16 bytes) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0)
+ __le16 len;
+ __le32 opaque;
+ __le32 v;
+ #define EJECT_CMPL_V 0x1UL
+ __le32 unused_2;
+};
+
+/* HWRM Completion Record (16 bytes) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define HWRM_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_CMPL_TYPE_SFT 0
+ #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0)
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define HWRM_CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* HWRM Forwarded Request (16 bytes) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused_0;
+ __le32 req_buf_addr_v[2];
+ #define HWRM_FWD_REQ_CMPL_V 0x1UL
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* HWRM Forwarded Response (16 bytes) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define HWRM_FWD_RESP_CMPL_V 0x1UL
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+};
+
+/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
+struct hwrm_async_event_cmpl_vf_flr {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ __le32 event_data2;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* HW Resource Manager Specification 0.7.8 */
+#define HWRM_VERSION_MAJOR 0
+#define HWRM_VERSION_MINOR 7
+#define HWRM_VERSION_UPDATE 8
+
+#define HWRM_VERSION_STR "0.7.8"
+/* Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
+/* Input (16 bytes) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET (0x0UL)
+ #define HWRM_FUNC_DISABLE (0x10UL)
+ #define HWRM_FUNC_RESET (0x11UL)
+ #define HWRM_FUNC_GETFID (0x12UL)
+ #define HWRM_FUNC_VF_ALLOC (0x13UL)
+ #define HWRM_FUNC_VF_FREE (0x14UL)
+ #define HWRM_FUNC_QCAPS (0x15UL)
+ #define HWRM_FUNC_QCFG (0x16UL)
+ #define HWRM_FUNC_CFG (0x17UL)
+ #define HWRM_FUNC_QSTATS (0x18UL)
+ #define HWRM_FUNC_CLR_STATS (0x19UL)
+ #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
+ #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
+ #define HWRM_FUNC_DRV_RGTR (0x1dUL)
+ #define HWRM_FUNC_DRV_QVER (0x1eUL)
+ #define HWRM_FUNC_BUF_RGTR (0x1fUL)
+ #define HWRM_FUNC_VF_CFG (0x20UL)
+ #define HWRM_PORT_PHY_CFG (0x20UL)
+ #define HWRM_PORT_MAC_CFG (0x21UL)
+ #define HWRM_PORT_ENABLE (0x22UL)
+ #define HWRM_PORT_QSTATS (0x23UL)
+ #define HWRM_PORT_LPBK_QSTATS (0x24UL)
+ #define HWRM_PORT_CLR_STATS (0x25UL)
+ #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
+ #define HWRM_PORT_PHY_QCFG (0x27UL)
+ #define HWRM_PORT_MAC_QCFG (0x28UL)
+ #define HWRM_PORT_BLINK_LED (0x29UL)
+ #define HWRM_QUEUE_QPORTCFG (0x30UL)
+ #define HWRM_QUEUE_QCFG (0x31UL)
+ #define HWRM_QUEUE_CFG (0x32UL)
+ #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL)
+ #define HWRM_QUEUE_BUFFERS_CFG (0x34UL)
+ #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
+ #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
+ #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
+ #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
+ #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
+ #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
+ #define HWRM_VNIC_ALLOC (0x40UL)
+ #define HWRM_VNIC_FREE (0x41UL)
+ #define HWRM_VNIC_CFG (0x42UL)
+ #define HWRM_VNIC_QCFG (0x43UL)
+ #define HWRM_VNIC_TPA_CFG (0x44UL)
+ #define HWRM_VNIC_TPA_QCFG (0x45UL)
+ #define HWRM_VNIC_RSS_CFG (0x46UL)
+ #define HWRM_VNIC_RSS_QCFG (0x47UL)
+ #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
+ #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_RING_ALLOC (0x50UL)
+ #define HWRM_RING_FREE (0x51UL)
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
+ #define HWRM_RING_RESET (0x5eUL)
+ #define HWRM_RING_GRP_ALLOC (0x60UL)
+ #define HWRM_RING_GRP_FREE (0x61UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
+ #define HWRM_ARB_GRP_ALLOC (0x80UL)
+ #define HWRM_ARB_GRP_CFG (0x81UL)
+ #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
+ #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
+ #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
+ #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
+ #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING (0x94UL)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
+ #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
+ #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
+ #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
+ #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
+ #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
+ #define HWRM_STAT_CTX_ALLOC (0xb0UL)
+ #define HWRM_STAT_CTX_FREE (0xb1UL)
+ #define HWRM_STAT_CTX_QUERY (0xb2UL)
+ #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
+ #define HWRM_FW_RESET (0xc0UL)
+ #define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_EXEC_FWD_RESP (0xd0UL)
+ #define HWRM_REJECT_FWD_RESP (0xd1UL)
+ #define HWRM_FWD_RESP (0xd2UL)
+ #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
+ #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
+ #define HWRM_MGMT_L2_FILTER_ALLOC (0x100UL)
+ #define HWRM_MGMT_L2_FILTER_FREE (0x101UL)
+ #define HWRM_DBG_READ_DIRECT (0xff10UL)
+ #define HWRM_DBG_READ_INDIRECT (0xff11UL)
+ #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
+ #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
+ #define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_MODIFY (0xfff4UL)
+ #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
+ #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
+ #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
+ #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
+ #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
+ #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
+ #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
+ #define HWRM_NVM_RAW_DUMP (0xfffcUL)
+ #define HWRM_NVM_READ (0xfffdUL)
+ #define HWRM_NVM_WRITE (0xfffeUL)
+ #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS (0x0UL)
+ #define HWRM_ERR_CODE_FAIL (0x1UL)
+ #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
+ #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
+ #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
+ #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
+ #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 opaque_2;
+ u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_ver_get */
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 hwrm_intf_rsvd;
+ u8 hwrm_fw_maj;
+ u8 hwrm_fw_min;
+ u8 hwrm_fw_bld;
+ u8 hwrm_fw_rsvd;
+ u8 ape_fw_maj;
+ u8 ape_fw_min;
+ u8 ape_fw_bld;
+ u8 ape_fw_rsvd;
+ u8 kong_fw_maj;
+ u8 kong_fw_min;
+ u8 kong_fw_bld;
+ u8 kong_fw_rsvd;
+ u8 tang_fw_maj;
+ u8 tang_fw_min;
+ u8 tang_fw_bld;
+ u8 tang_fw_rsvd;
+ u8 bono_fw_maj;
+ u8 bono_fw_min;
+ u8 bono_fw_bld;
+ u8 bono_fw_rsvd;
+ char hwrm_fw_name[16];
+ char ape_fw_name[16];
+ char kong_fw_name[16];
+ char tang_fw_name[16];
+ char bono_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 unused_0;
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_disable */
+/* Input (24 bytes) */
+struct hwrm_func_disable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_disable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_reset */
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_getfid */
+/* Input (24 bytes) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc */
+/* Input (24 bytes) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_func_vf_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg */
+/* Input (24 bytes) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ __le16 mtu;
+ __le16 guest_vlan;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_qcaps */
+/* Input (24 bytes) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ u8 perm_mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_func_cfg */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL
+ #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL
+ #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ __le16 mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ __le32 max_bw;
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ u8 allowed_vlan_pris;
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK (0x0UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN (0x1UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE (0x2UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0)
+ u8 unused_2;
+ __le16 num_mcast_filters;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_qstats */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 max_vnic_id_cnt;
+ __le64 vnic_id_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id_cnt;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr */
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 timestamp;
+ __le32 unused_2;
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr */
+/* Input (24 bytes) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver */
+/* Input (24 bytes) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID 0x1UL
+ #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID 0x2UL
+ __le16 fid;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg */
+/* Input (48 bytes) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0)
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0)
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ u8 unused_0;
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0)
+ u8 unused_0;
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 duplex;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0)
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 duplex_setting;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL (0x1UL << 0)
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
+ u8 transceiver_type;
+ #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ u8 phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ u8 unused_2;
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ u8 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg */
+/* Input (32 bytes) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0)
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ u8 ivlan_pri2cos_map_pri;
+ u8 lcos_map_pri;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_enable */
+/* Input (24 bytes) */
+struct hwrm_port_enable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC 0x1UL
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_enable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_qstats */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (64 bytes) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_blink_led */
+/* Input (24 bytes) */
+struct hwrm_port_blink_led_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 num_blinks;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_blink_led_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_buffers_cfg_allowed;
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 valid;
+};
+
+/* hwrm_queue_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_buffers_cfg */
+/* Input (56 bytes) */
+struct hwrm_queue_buffers_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP 0x4UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x8UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x10UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x20UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x40UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x80UL
+ __le32 queue_id;
+ __le32 reserved;
+ __le32 shared;
+ __le32 xoff;
+ __le32 xon;
+ __le32 full;
+ __le32 notfull;
+ __le32 max;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_buffers_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED 0x80UL
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
+ __le32 enables;
+ u8 port_id;
+ u8 pri0_cos;
+ u8 pri1_cos;
+ u8 pri2_cos;
+ u8 pri3_cos;
+ u8 pri4_cos;
+ u8 pri5_cos;
+ u8 pri6_cos;
+ u8 pri7_cos;
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg */
+/* Input (128 bytes) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ __le32 queue_id0_max_bw;
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ __le32 queue_id1_max_bw;
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ __le32 queue_id2_max_bw;
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ __le32 queue_id3_max_bw;
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ __le32 queue_id4_max_bw;
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ __le32 queue_id5_max_bw;
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ __le32 queue_id6_max_bw;
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ __le32 queue_id7_max_bw;
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc */
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_vnic_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0)
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0)
+ u8 unused_0;
+ u8 unused_1;
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg */
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ __le32 unused_0;
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ __le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_alloc */
+/* Input (80 bytes) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID 0x1UL
+ #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID 0x4UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID 0x10UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ u8 unused_2;
+ u8 unused_3;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ u8 unused_4;
+ u8 unused_5;
+ __le32 arb_grp_id;
+ __le16 input_number;
+ u8 unused_6;
+ u8 unused_7;
+ __le32 weight;
+ __le32 stat_ctx_id;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_MSI (0x1UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0)
+ u8 unused_8[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_ring_free */
+/* Input (24 bytes) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 ring_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params */
+/* Input (24 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params */
+/* Input (40 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_reset */
+/* Input (24 bytes) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 ring_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_arb_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_arb_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 input_number;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 arb_grp_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_arb_grp_cfg */
+/* Input (32 bytes) */
+struct hwrm_arb_grp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 arb_grp_id;
+ __le16 input_number;
+ __le16 tx_ring;
+ __le32 weight;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc */
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ u8 l2_addr[6];
+ u8 unused_0;
+ u8 unused_1;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_2;
+ u8 unused_3;
+ u8 t_l2_addr[6];
+ u8 unused_4;
+ u8 unused_5;
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0)
+ u8 unused_6;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_7;
+ __le16 dst_vnic_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0)
+ u8 unused_8;
+ __le32 unused_9;
+ __le64 l2_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID 0x1UL
+ __le64 l2_filter_id;
+ __le32 dst_vnic_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 dflt_vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST 0x1UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_bcastmcast_mirroring */
+/* Input (32 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 dflt_vnic_id;
+ __le32 mirroring_flags;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL
+ __le16 vlan_id;
+ u8 bcast_domain;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+ u8 mcast_domain;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc */
+/* Input (88 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_encap_record_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 encap_data[16];
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_encap_record_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 encap_record_id;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_encap_record_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 encap_record_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_encap_record_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ipaddr_type;
+ u8 ip_protocol;
+ __le16 dst_vnic_id;
+ __le16 mirror_vnic_id;
+ u8 tunnel_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 pri_hint;
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL
+ __le32 unused_0;
+ __le64 ntuple_filter_id;
+ __le32 new_dst_vnic_id;
+ __le32 new_mirror_vnic_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __be16 tunnel_dst_port_val;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __le16 tunnel_dst_port_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc */
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_alloc */
+/* Input (56 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS 0x1UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN 0x2UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN 0x4UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID 0x8UL
+ u8 l2_address[6];
+ u8 unused_0;
+ u8 unused_1;
+ u8 l2_address_mask[6];
+ __le16 ovlan;
+ __le16 ovlan_mask;
+ __le16 ivlan;
+ __le16 ivlan_mask;
+ u8 unused_2;
+ u8 unused_3;
+ __le32 action_id;
+ u8 action_bypass;
+ #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS 0x1UL
+ u8 unused_5[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mgmt_l2_filter_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_mgmt_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 mgmt_l2_filter_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_write_blk_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le32 dest_addr;
+ __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_write_blk_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_dump */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_dump_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 offset;
+ __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_dump_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries */
+/* Input (24 bytes) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_write */
+/* Input (40 bytes) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_modify */
+/* Input (40 bytes) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0)
+ u8 unused_1[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry */
+/* Input (24 bytes) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_exec_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[24];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[24];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fwd_resp */
+/* Input (40 bytes) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl */
+/* Input (32 bytes) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fw_reset */
+/* Input (24 bytes) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query */
+/* Input (16 bytes) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
new file mode 100644
index 000000000000..3cf3e1b70b64
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -0,0 +1,59 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+ BNX_DIR_TYPE_UNUSED = 0,
+ BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_CHIMP_PATCH = 3,
+ BNX_DIR_TYPE_BOOTCODE = 4,
+ BNX_DIR_TYPE_VPD = 5,
+ BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+ BNX_DIR_TYPE_AVS = 7,
+ BNX_DIR_TYPE_PCIE = 8,
+ BNX_DIR_TYPE_PORT_MACRO = 9,
+ BNX_DIR_TYPE_APE_FW = 10,
+ BNX_DIR_TYPE_APE_PATCH = 11,
+ BNX_DIR_TYPE_KONG_FW = 12,
+ BNX_DIR_TYPE_KONG_PATCH = 13,
+ BNX_DIR_TYPE_BONO_FW = 14,
+ BNX_DIR_TYPE_BONO_PATCH = 15,
+ BNX_DIR_TYPE_TANG_FW = 16,
+ BNX_DIR_TYPE_TANG_PATCH = 17,
+ BNX_DIR_TYPE_BOOTCODE_2 = 18,
+ BNX_DIR_TYPE_CCM = 19,
+ BNX_DIR_TYPE_PCI_CFG = 20,
+ BNX_DIR_TYPE_TSCF_UCODE = 21,
+ BNX_DIR_TYPE_ISCSI_BOOT = 22,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+ BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+ BNX_DIR_TYPE_EXT_PHY = 27,
+ BNX_DIR_TYPE_SHARED_CFG = 40,
+ BNX_DIR_TYPE_PORT_CFG = 41,
+ BNX_DIR_TYPE_FUNC_CFG = 42,
+ BNX_DIR_TYPE_MGMT_CFG = 48,
+ BNX_DIR_TYPE_MGMT_DATA = 49,
+ BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+ BNX_DIR_TYPE_MGMT_WEB_META = 51,
+ BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+ BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST 0
+
+#define BNX_DIR_EXT_INACTIVE (1 << 0)
+#define BNX_DIR_EXT_UPDATE (1 << 1)
+
+#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
+
+#endif /* Don't add anything after this line */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
new file mode 100644
index 000000000000..60989e7e266a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -0,0 +1,816 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
+{
+ if (bp->state != BNXT_STATE_OPEN) {
+ netdev_err(bp->dev, "vf ndo called though PF is down\n");
+ return -EINVAL;
+ }
+ if (!bp->pf.active_vfs) {
+ netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+ if (vf_id >= bp->pf.max_vfs) {
+ netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ bool old_setting = false;
+ u32 func_flags;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+ if (vf->flags & BNXT_VF_SPOOFCHK)
+ old_setting = true;
+ if (old_setting == setting)
+ return 0;
+
+ func_flags = vf->func_flags;
+ if (setting)
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ else
+ func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ /*TODO: if the driver supports VLAN filter on guest VLAN,
+ * the spoof check should also include vlan anti-spoofing
+ */
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(func_flags);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ vf->func_flags = func_flags;
+ if (setting)
+ vf->flags |= BNXT_VF_SPOOFCHK;
+ else
+ vf->flags &= ~BNXT_VF_SPOOFCHK;
+ }
+ return rc;
+}
+
+int bnxt_get_vf_config(struct net_device *dev, int vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ ivi->vf = vf_id;
+ vf = &bp->pf.vf[vf_id];
+
+ memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
+ ivi->vlan = vf->vlan;
+ ivi->qos = vf->flags & BNXT_VF_QOS;
+ ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+ if (!(vf->flags & BNXT_VF_LINK_FORCED))
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->flags & BNXT_VF_LINK_UP)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
+ return 0;
+}
+
+int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+ /* reject bc or mc mac addr, zero mac addr means allow
+ * VF to use its own mac addr
+ */
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(dev, "Invalid VF ethernet address\n");
+ return -EINVAL;
+ }
+ vf = &bp->pf.vf[vf_id];
+
+ memcpy(vf->mac_addr, mac, ETH_ALEN);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ u16 vlan_tag;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ /* TODO: needed to implement proper handling of user priority,
+ * currently fail the command if there is valid priority
+ */
+ if (vlan_id > 4095 || qos)
+ return -EINVAL;
+
+ vf = &bp->pf.vf[vf_id];
+ vlan_tag = vlan_id;
+ if (vlan_tag == vf->vlan)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.dflt_vlan = cpu_to_le16(vlan_tag);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ vf->vlan = vlan_tag;
+ return rc;
+}
+
+int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ u32 pf_link_speed;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+ pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ if (max_tx_rate > pf_link_speed) {
+ netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
+ max_tx_rate, vf_id);
+ return -EINVAL;
+ }
+
+ if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
+ min_tx_rate, vf_id);
+ return -EINVAL;
+ }
+ if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
+ return 0;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+ req.max_bw = cpu_to_le32(max_tx_rate);
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+ req.min_bw = cpu_to_le32(min_tx_rate);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ vf->min_tx_rate = min_tx_rate;
+ vf->max_tx_rate = max_tx_rate;
+ }
+ return rc;
+}
+
+int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+
+ vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->flags |= BNXT_VF_LINK_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->flags |= BNXT_VF_LINK_FORCED;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
+ break;
+ default:
+ netdev_err(bp->dev, "Invalid link option\n");
+ rc = -EINVAL;
+ break;
+ }
+ /* CHIMP TODO: send msg to VF to update new link state */
+
+ return rc;
+}
+
+static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
+{
+ int i;
+ struct bnxt_vf_info *vf;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &bp->pf.vf[i];
+ memset(vf, 0, sizeof(*vf));
+ vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
+{
+ int i, rc = 0;
+ struct bnxt_pf_info *pf = &bp->pf;
+ struct hwrm_func_vf_resc_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) {
+ req.vf_id = cpu_to_le16(i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_free_vf_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+
+ kfree(bp->pf.vf_event_bmap);
+ bp->pf.vf_event_bmap = NULL;
+
+ for (i = 0; i < 4; i++) {
+ if (bp->pf.hwrm_cmd_req_addr[i]) {
+ dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+ bp->pf.hwrm_cmd_req_addr[i],
+ bp->pf.hwrm_cmd_req_dma_addr[i]);
+ bp->pf.hwrm_cmd_req_addr[i] = NULL;
+ }
+ }
+
+ kfree(bp->pf.vf);
+ bp->pf.vf = NULL;
+}
+
+static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
+{
+ struct pci_dev *pdev = bp->pdev;
+ u32 nr_pages, size, i, j, k = 0;
+
+ bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
+ if (!bp->pf.vf)
+ return -ENOMEM;
+
+ bnxt_set_vf_attr(bp, num_vfs);
+
+ size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
+ nr_pages = size / BNXT_PAGE_SIZE;
+ if (size & (BNXT_PAGE_SIZE - 1))
+ nr_pages++;
+
+ for (i = 0; i < nr_pages; i++) {
+ bp->pf.hwrm_cmd_req_addr[i] =
+ dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+ &bp->pf.hwrm_cmd_req_dma_addr[i],
+ GFP_KERNEL);
+
+ if (!bp->pf.hwrm_cmd_req_addr[i])
+ return -ENOMEM;
+
+ for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
+ struct bnxt_vf_info *vf = &bp->pf.vf[k];
+
+ vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
+ j * BNXT_HWRM_REQ_MAX_SIZE;
+ vf->hwrm_cmd_req_dma_addr =
+ bp->pf.hwrm_cmd_req_dma_addr[i] + j *
+ BNXT_HWRM_REQ_MAX_SIZE;
+ k++;
+ }
+ }
+
+ /* Max 128 VF's */
+ bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
+ if (!bp->pf.vf_event_bmap)
+ return -ENOMEM;
+
+ bp->pf.hwrm_cmd_req_pages = nr_pages;
+ return 0;
+}
+
+static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_buf_rgtr_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+
+ req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+ req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+ req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+ req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+ req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+ req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+ req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+/* only call by PF to reserve resources for VF */
+static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
+{
+ u32 rc = 0, mtu, i;
+ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+
+ /* Remaining rings are distributed equally amongs VF's for now */
+ /* TODO: the following workaroud is needed to restrict total number
+ * of vf_cp_rings not exceed number of HW ring groups. This WA should
+ * be removed once new HWRM provides HW ring groups capability in
+ * hwrm_func_qcap.
+ */
+ vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs);
+ vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs;
+ /* TODO: restore this logic below once the WA above is removed */
+ /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
+ vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) /
+ *num_vfs;
+ else
+ vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) /
+ *num_vfs;
+ vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs;
+
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+ FUNC_CFG_REQ_ENABLES_MRU |
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS);
+
+ mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ req.mru = cpu_to_le16(mtu);
+ req.mtu = cpu_to_le16(mtu);
+
+ req.num_rsscos_ctxs = cpu_to_le16(1);
+ req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.num_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.num_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.num_l2_ctxs = cpu_to_le16(4);
+ vf_vnics = 1;
+
+ req.num_vnics = cpu_to_le16(vf_vnics);
+ /* FIXME spec currently uses 1 bit for stats ctx */
+ req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < *num_vfs; i++) {
+ req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ bp->pf.active_vfs = i + 1;
+ bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc) {
+ bp->pf.max_pf_tx_rings = bp->tx_nr_rings;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2;
+ else
+ bp->pf.max_pf_rx_rings = bp->rx_nr_rings;
+ }
+ return rc;
+}
+
+static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+{
+ int rc = 0, vfs_supported;
+ int min_rx_rings, min_tx_rings, min_rss_ctxs;
+ int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+
+ /* Check if we can enable requested num of vf's. At a mininum
+ * we require 1 RX 1 TX rings for each VF. In this minimum conf
+ * features like TPA will not be available.
+ */
+ vfs_supported = *num_vfs;
+
+ while (vfs_supported) {
+ min_rx_rings = vfs_supported;
+ min_tx_rings = vfs_supported;
+ min_rss_ctxs = vfs_supported;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+ min_rx_rings)
+ rx_ok = 1;
+ } else {
+ if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+ min_rx_rings)
+ rx_ok = 1;
+ }
+
+ if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+ tx_ok = 1;
+
+ if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+ rss_ok = 1;
+
+ if (tx_ok && rx_ok && rss_ok)
+ break;
+
+ vfs_supported--;
+ }
+
+ if (!vfs_supported) {
+ netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
+ return -EINVAL;
+ }
+
+ if (vfs_supported != *num_vfs) {
+ netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
+ *num_vfs, vfs_supported);
+ *num_vfs = vfs_supported;
+ }
+
+ rc = bnxt_alloc_vf_resources(bp, *num_vfs);
+ if (rc)
+ goto err_out1;
+
+ /* Reserve resources for VFs */
+ rc = bnxt_hwrm_func_cfg(bp, num_vfs);
+ if (rc)
+ goto err_out2;
+
+ /* Register buffers for VFs */
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ goto err_out2;
+
+ rc = pci_enable_sriov(bp->pdev, *num_vfs);
+ if (rc)
+ goto err_out2;
+
+ return 0;
+
+err_out2:
+ /* Free the resources reserved for various VF's */
+ bnxt_hwrm_func_vf_resource_free(bp);
+
+err_out1:
+ bnxt_free_vf_resources(bp);
+
+ return rc;
+}
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+ if (!bp->pf.active_vfs)
+ return;
+
+ pci_disable_sriov(bp->pdev);
+
+ /* Free the resources reserved for various VF's */
+ bnxt_hwrm_func_vf_resource_free(bp);
+
+ bnxt_free_vf_resources(bp);
+
+ bp->pf.active_vfs = 0;
+ bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings;
+ bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings;
+}
+
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
+ return 0;
+ }
+
+ rtnl_lock();
+ if (!netif_running(dev)) {
+ netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ rtnl_unlock();
+ return 0;
+ }
+ bp->sriov_cfg = true;
+ rtnl_unlock();
+ if (!num_vfs) {
+ bnxt_sriov_disable(bp);
+ return 0;
+ }
+
+ /* Check if enabled VFs is same as requested */
+ if (num_vfs == bp->pf.active_vfs)
+ return 0;
+
+ bnxt_sriov_enable(bp, &num_vfs);
+
+ bp->sriov_cfg = false;
+ wake_up(&bp->sriov_cfg_wait);
+
+ return num_vfs;
+}
+
+static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ void *encap_resp, __le64 encap_resp_addr,
+ __le16 encap_resp_cpr, u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_fwd_resp_input req = {0};
+ struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
+
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ req.encap_resp_len = cpu_to_le16(msg_size);
+ req.encap_resp_addr = encap_resp_addr;
+ req.encap_resp_cmpl_ring = encap_resp_cpr;
+ memcpy(req.encap_resp, encap_resp, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
+ goto fwd_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_reject_fwd_resp_input req = {0};
+ struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
+ goto fwd_err_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_err_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_exec_fwd_resp_input req = {0};
+ struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
+ goto exec_fwd_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+exec_fwd_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
+ struct hwrm_cfa_l2_filter_alloc_input *req =
+ (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+
+ if (!is_valid_ether_addr(vf->mac_addr) ||
+ ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ else
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+}
+
+static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ int rc = 0;
+
+ if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
+ /* real link */
+ rc = bnxt_hwrm_exec_fwd_resp(
+ bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
+ } else {
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+ struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
+
+ phy_qcfg_req =
+ (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
+ sizeof(phy_qcfg_resp));
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+
+ if (vf->flags & BNXT_VF_LINK_UP) {
+ /* if physical link is down, force link up on VF */
+ if (phy_qcfg_resp.link ==
+ PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+ phy_qcfg_resp.link =
+ PORT_PHY_QCFG_RESP_LINK_LINK;
+ if (phy_qcfg_resp.auto_link_speed)
+ phy_qcfg_resp.link_speed =
+ phy_qcfg_resp.auto_link_speed;
+ else
+ phy_qcfg_resp.link_speed =
+ phy_qcfg_resp.force_link_speed;
+ phy_qcfg_resp.duplex =
+ PORT_PHY_QCFG_RESP_DUPLEX_FULL;
+ phy_qcfg_resp.pause =
+ (PORT_PHY_QCFG_RESP_PAUSE_TX |
+ PORT_PHY_QCFG_RESP_PAUSE_RX);
+ }
+ } else {
+ /* force link down */
+ phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
+ phy_qcfg_resp.link_speed = 0;
+ phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
+ phy_qcfg_resp.pause = 0;
+ }
+ rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
+ phy_qcfg_req->resp_addr,
+ phy_qcfg_req->cmpl_ring,
+ sizeof(phy_qcfg_resp));
+ }
+ return rc;
+}
+
+static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ int rc = 0;
+ struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
+ u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
+
+ switch (req_type) {
+ case HWRM_CFA_L2_FILTER_ALLOC:
+ rc = bnxt_vf_validate_set_mac(bp, vf);
+ break;
+ case HWRM_FUNC_CFG:
+ /* TODO Validate if VF is allowed to change mac address,
+ * mtu, num of rings etc
+ */
+ rc = bnxt_hwrm_exec_fwd_resp(
+ bp, vf, sizeof(struct hwrm_func_cfg_input));
+ break;
+ case HWRM_PORT_PHY_QCFG:
+ rc = bnxt_vf_set_link(bp, vf);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+ u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
+
+ /* Scan through VF's and process commands */
+ while (1) {
+ vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
+ if (vf_id >= active_vfs)
+ break;
+
+ clear_bit(vf_id, bp->pf.vf_event_bmap);
+ bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
+ i = vf_id + 1;
+ }
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ goto update_vf_mac_exit;
+
+ if (!is_valid_ether_addr(resp->perm_mac_address))
+ goto update_vf_mac_exit;
+
+ if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
+ goto update_vf_mac_exit;
+
+ memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+update_vf_mac_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+#else
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+ netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+}
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
new file mode 100644
index 000000000000..c151280e3980
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -0,0 +1,23 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_SRIOV_H
+#define BNXT_SRIOV_H
+
+int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_bw(struct net_device *, int, int, int);
+int bnxt_set_vf_link_state(struct net_device *, int, int);
+int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void bnxt_sriov_disable(struct bnxt *);
+void bnxt_hwrm_exec_fwd_req(struct bnxt *);
+void bnxt_update_vf_mac(struct bnxt *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 1805541b4240..17f017ab4dac 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -205,6 +205,23 @@ enum dma_reg {
DMA_INDEX2RING_5,
DMA_INDEX2RING_6,
DMA_INDEX2RING_7,
+ DMA_RING0_TIMEOUT,
+ DMA_RING1_TIMEOUT,
+ DMA_RING2_TIMEOUT,
+ DMA_RING3_TIMEOUT,
+ DMA_RING4_TIMEOUT,
+ DMA_RING5_TIMEOUT,
+ DMA_RING6_TIMEOUT,
+ DMA_RING7_TIMEOUT,
+ DMA_RING8_TIMEOUT,
+ DMA_RING9_TIMEOUT,
+ DMA_RING10_TIMEOUT,
+ DMA_RING11_TIMEOUT,
+ DMA_RING12_TIMEOUT,
+ DMA_RING13_TIMEOUT,
+ DMA_RING14_TIMEOUT,
+ DMA_RING15_TIMEOUT,
+ DMA_RING16_TIMEOUT,
};
static const u8 bcmgenet_dma_regs_v3plus[] = {
@@ -216,6 +233,23 @@ static const u8 bcmgenet_dma_regs_v3plus[] = {
[DMA_PRIORITY_0] = 0x30,
[DMA_PRIORITY_1] = 0x34,
[DMA_PRIORITY_2] = 0x38,
+ [DMA_RING0_TIMEOUT] = 0x2C,
+ [DMA_RING1_TIMEOUT] = 0x30,
+ [DMA_RING2_TIMEOUT] = 0x34,
+ [DMA_RING3_TIMEOUT] = 0x38,
+ [DMA_RING4_TIMEOUT] = 0x3c,
+ [DMA_RING5_TIMEOUT] = 0x40,
+ [DMA_RING6_TIMEOUT] = 0x44,
+ [DMA_RING7_TIMEOUT] = 0x48,
+ [DMA_RING8_TIMEOUT] = 0x4c,
+ [DMA_RING9_TIMEOUT] = 0x50,
+ [DMA_RING10_TIMEOUT] = 0x54,
+ [DMA_RING11_TIMEOUT] = 0x58,
+ [DMA_RING12_TIMEOUT] = 0x5c,
+ [DMA_RING13_TIMEOUT] = 0x60,
+ [DMA_RING14_TIMEOUT] = 0x64,
+ [DMA_RING15_TIMEOUT] = 0x68,
+ [DMA_RING16_TIMEOUT] = 0x6C,
[DMA_INDEX2RING_0] = 0x70,
[DMA_INDEX2RING_1] = 0x74,
[DMA_INDEX2RING_2] = 0x78,
@@ -235,6 +269,23 @@ static const u8 bcmgenet_dma_regs_v2[] = {
[DMA_PRIORITY_0] = 0x34,
[DMA_PRIORITY_1] = 0x38,
[DMA_PRIORITY_2] = 0x3C,
+ [DMA_RING0_TIMEOUT] = 0x2C,
+ [DMA_RING1_TIMEOUT] = 0x30,
+ [DMA_RING2_TIMEOUT] = 0x34,
+ [DMA_RING3_TIMEOUT] = 0x38,
+ [DMA_RING4_TIMEOUT] = 0x3c,
+ [DMA_RING5_TIMEOUT] = 0x40,
+ [DMA_RING6_TIMEOUT] = 0x44,
+ [DMA_RING7_TIMEOUT] = 0x48,
+ [DMA_RING8_TIMEOUT] = 0x4c,
+ [DMA_RING9_TIMEOUT] = 0x50,
+ [DMA_RING10_TIMEOUT] = 0x54,
+ [DMA_RING11_TIMEOUT] = 0x58,
+ [DMA_RING12_TIMEOUT] = 0x5c,
+ [DMA_RING13_TIMEOUT] = 0x60,
+ [DMA_RING14_TIMEOUT] = 0x64,
+ [DMA_RING15_TIMEOUT] = 0x68,
+ [DMA_RING16_TIMEOUT] = 0x6C,
};
static const u8 bcmgenet_dma_regs_v1[] = {
@@ -245,6 +296,23 @@ static const u8 bcmgenet_dma_regs_v1[] = {
[DMA_PRIORITY_0] = 0x34,
[DMA_PRIORITY_1] = 0x38,
[DMA_PRIORITY_2] = 0x3C,
+ [DMA_RING0_TIMEOUT] = 0x2C,
+ [DMA_RING1_TIMEOUT] = 0x30,
+ [DMA_RING2_TIMEOUT] = 0x34,
+ [DMA_RING3_TIMEOUT] = 0x38,
+ [DMA_RING4_TIMEOUT] = 0x3c,
+ [DMA_RING5_TIMEOUT] = 0x40,
+ [DMA_RING6_TIMEOUT] = 0x44,
+ [DMA_RING7_TIMEOUT] = 0x48,
+ [DMA_RING8_TIMEOUT] = 0x4c,
+ [DMA_RING9_TIMEOUT] = 0x50,
+ [DMA_RING10_TIMEOUT] = 0x54,
+ [DMA_RING11_TIMEOUT] = 0x58,
+ [DMA_RING12_TIMEOUT] = 0x5c,
+ [DMA_RING13_TIMEOUT] = 0x60,
+ [DMA_RING14_TIMEOUT] = 0x64,
+ [DMA_RING15_TIMEOUT] = 0x68,
+ [DMA_RING16_TIMEOUT] = 0x6C,
};
/* Set at runtime once bcmgenet version is known */
@@ -498,6 +566,85 @@ static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
priv->msg_enable = level;
}
+static int bcmgenet_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ ec->tx_max_coalesced_frames =
+ bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
+ DMA_MBUF_DONE_THRESH);
+ ec->rx_max_coalesced_frames =
+ bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
+ DMA_MBUF_DONE_THRESH);
+ ec->rx_coalesce_usecs =
+ bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+
+ return 0;
+}
+
+static int bcmgenet_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+
+ /* Base system clock is 125Mhz, DMA timeout is this reference clock
+ * divided by 1024, which yields roughly 8.192us, our maximum value
+ * has to fit in the DMA_TIMEOUT_MASK (16 bits)
+ */
+ if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
+ ec->tx_max_coalesced_frames == 0 ||
+ ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
+ ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
+ return -EINVAL;
+
+ /* GENET TDMA hardware does not support a configurable timeout, but will
+ * always generate an interrupt either after MBDONE packets have been
+ * transmitted, or when the ring is emtpy.
+ */
+ if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
+ ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
+ return -EOPNOTSUPP;
+
+ /* Program all TX queues with the same values, as there is no
+ * ethtool knob to do coalescing on a per-queue basis
+ */
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_tdma_ring_writel(priv, i,
+ ec->tx_max_coalesced_frames,
+ DMA_MBUF_DONE_THRESH);
+ bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
+ ec->tx_max_coalesced_frames,
+ DMA_MBUF_DONE_THRESH);
+
+ for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ bcmgenet_rdma_ring_writel(priv, i,
+ ec->rx_max_coalesced_frames,
+ DMA_MBUF_DONE_THRESH);
+
+ reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
+ reg &= ~DMA_TIMEOUT_MASK;
+ reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
+ bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
+ }
+
+ bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
+ ec->rx_max_coalesced_frames,
+ DMA_MBUF_DONE_THRESH);
+
+ reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
+ reg &= ~DMA_TIMEOUT_MASK;
+ reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
+ bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
+
+ return 0;
+}
+
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
BCMGENET_STAT_NETDEV = -1,
@@ -646,7 +793,6 @@ static void bcmgenet_get_drvinfo(struct net_device *dev,
{
strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
strlcpy(info->version, "v2.0", sizeof(info->version));
- info->n_stats = BCMGENET_STATS_LEN;
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -844,6 +990,8 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
.get_eee = bcmgenet_get_eee,
.set_eee = bcmgenet_set_eee,
.nway_reset = bcmgenet_nway_reset,
+ .get_coalesce = bcmgenet_get_coalesce,
+ .set_coalesce = bcmgenet_set_coalesce,
};
/* Power down the unimac, based on mode. */
@@ -907,8 +1055,10 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- if (mode == GENET_POWER_PASSIVE)
+ if (mode == GENET_POWER_PASSIVE) {
bcmgenet_phy_power_set(priv->dev, true);
+ bcmgenet_mii_reset(priv->dev);
+ }
}
/* ioctl handle special commands that are not present in ethtool. */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7299d1075422..967367557309 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -304,13 +304,12 @@ struct bcmgenet_mib_counters {
#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
-#define UMAC_IRQ_RXDMA_DONE (UMAC_IRQ_RXDMA_PDONE | \
- UMAC_IRQ_RXDMA_BDONE)
+#define UMAC_IRQ_RXDMA_DONE UMAC_IRQ_RXDMA_MBDONE
#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
-#define UMAC_IRQ_TXDMA_DONE (UMAC_IRQ_TXDMA_PDONE | \
- UMAC_IRQ_TXDMA_BDONE)
+#define UMAC_IRQ_TXDMA_DONE UMAC_IRQ_TXDMA_MBDONE
+
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
@@ -386,7 +385,7 @@ struct bcmgenet_mib_counters {
#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF
/* DMA interrupt threshold register */
-#define DMA_INTR_THRESHOLD_MASK 0x00FF
+#define DMA_INTR_THRESHOLD_MASK 0x01FF
/* DMA XON/XOFF register */
#define DMA_XON_THREHOLD_MASK 0xFFFF
@@ -674,6 +673,7 @@ int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev);
int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_mii_reset(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c8affad76f36..8bdfe53754ba 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -163,6 +163,7 @@ void bcmgenet_mii_setup(struct net_device *dev)
phy_print_status(phydev);
}
+
static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
struct fixed_phy_status *status)
{
@@ -172,6 +173,22 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
return 0;
}
+/* Perform a voluntary PHY software reset, since the EPHY is very finicky about
+ * not doing it and will start corrupting packets
+ */
+void bcmgenet_mii_reset(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (GENET_IS_V4(priv))
+ return;
+
+ if (priv->phydev) {
+ phy_init_hw(priv->phydev);
+ phy_start_aneg(priv->phydev);
+ }
+}
+
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -214,6 +231,7 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_PWR_DN_EN_LD;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_mii_reset(dev);
}
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 29f330831784..245c063ed4db 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -153,7 +153,6 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
ETHTOOL_FWVERS_LEN);
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
- drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
}
static void
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index fa0c7b54ec7a..634e50c8c5ef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -47,6 +47,7 @@
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
@@ -478,6 +479,8 @@ struct port_info {
#ifdef CONFIG_CHELSIO_T4_FCOE
struct cxgb_fcoe fcoe;
#endif /* CONFIG_CHELSIO_T4_FCOE */
+ bool rxtstamp; /* Enable TS */
+ struct hwtstamp_config tstamp_config;
};
struct dentry;
@@ -517,6 +520,7 @@ struct sge_fl { /* SGE free-buffer queue state */
/* A packet gather list */
struct pkt_gl {
+ u64 sgetstamp; /* SGE Time Stamp for Ingress Packet */
struct page_frag frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0a87a3247464..4269944c5db5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -940,6 +940,7 @@ static const char * const devlog_level_strings[] = {
static const char * const devlog_facility_strings[] = {
[FW_DEVLOG_FACILITY_CORE] = "CORE",
+ [FW_DEVLOG_FACILITY_CF] = "CF",
[FW_DEVLOG_FACILITY_SCHED] = "SCHED",
[FW_DEVLOG_FACILITY_TIMER] = "TIMER",
[FW_DEVLOG_FACILITY_RES] = "RES",
@@ -1128,18 +1129,26 @@ static const struct file_operations devlog_fops = {
static int mbox_show(struct seq_file *seq, void *v)
{
static const char * const owner[] = { "none", "FW", "driver",
- "unknown" };
+ "unknown", "<unread>" };
int i;
unsigned int mbox = (uintptr_t)seq->private & 7;
struct adapter *adap = seq->private - mbox;
void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
- unsigned int ctrl_reg = (is_t4(adap->params.chip)
- ? CIM_PF_MAILBOX_CTRL_A
- : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A);
- void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
- i = MBOWNER_G(readl(ctrl));
+ /* For T4 we don't have a shadow copy of the Mailbox Control register.
+ * And since reading that real register causes a side effect of
+ * granting ownership, we're best of simply not reading it at all.
+ */
+ if (is_t4(adap->params.chip)) {
+ i = 4; /* index of "<unread>" */
+ } else {
+ unsigned int ctrl_reg = CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A;
+ void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
+
+ i = MBOWNER_G(readl(ctrl));
+ }
+
seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
for (i = 0; i < MBOX_LEN; i += 8)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 5eedb98ff581..a077f9476daf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val)
}
static const char stats_strings[][ETH_GSTRING_LEN] = {
- "TxOctetsOK ",
- "TxFramesOK ",
- "TxBroadcastFrames ",
- "TxMulticastFrames ",
- "TxUnicastFrames ",
- "TxErrorFrames ",
-
- "TxFrames64 ",
- "TxFrames65To127 ",
- "TxFrames128To255 ",
- "TxFrames256To511 ",
- "TxFrames512To1023 ",
- "TxFrames1024To1518 ",
- "TxFrames1519ToMax ",
-
- "TxFramesDropped ",
- "TxPauseFrames ",
- "TxPPP0Frames ",
- "TxPPP1Frames ",
- "TxPPP2Frames ",
- "TxPPP3Frames ",
- "TxPPP4Frames ",
- "TxPPP5Frames ",
- "TxPPP6Frames ",
- "TxPPP7Frames ",
-
- "RxOctetsOK ",
- "RxFramesOK ",
- "RxBroadcastFrames ",
- "RxMulticastFrames ",
- "RxUnicastFrames ",
-
- "RxFramesTooLong ",
- "RxJabberErrors ",
- "RxFCSErrors ",
- "RxLengthErrors ",
- "RxSymbolErrors ",
- "RxRuntFrames ",
-
- "RxFrames64 ",
- "RxFrames65To127 ",
- "RxFrames128To255 ",
- "RxFrames256To511 ",
- "RxFrames512To1023 ",
- "RxFrames1024To1518 ",
- "RxFrames1519ToMax ",
-
- "RxPauseFrames ",
- "RxPPP0Frames ",
- "RxPPP1Frames ",
- "RxPPP2Frames ",
- "RxPPP3Frames ",
- "RxPPP4Frames ",
- "RxPPP5Frames ",
- "RxPPP6Frames ",
- "RxPPP7Frames ",
-
- "RxBG0FramesDropped ",
- "RxBG1FramesDropped ",
- "RxBG2FramesDropped ",
- "RxBG3FramesDropped ",
- "RxBG0FramesTrunc ",
- "RxBG1FramesTrunc ",
- "RxBG2FramesTrunc ",
- "RxBG3FramesTrunc ",
-
- "TSO ",
- "TxCsumOffload ",
- "RxCsumGood ",
- "VLANextractions ",
- "VLANinsertions ",
- "GROpackets ",
- "GROmerged ",
+ "tx_octets_ok ",
+ "tx_frames_ok ",
+ "tx_broadcast_frames ",
+ "tx_multicast_frames ",
+ "tx_unicast_frames ",
+ "tx_error_frames ",
+
+ "tx_frames_64 ",
+ "tx_frames_65_to_127 ",
+ "tx_frames_128_to_255 ",
+ "tx_frames_256_to_511 ",
+ "tx_frames_512_to_1023 ",
+ "tx_frames_1024_to_1518 ",
+ "tx_frames_1519_to_max ",
+
+ "tx_frames_dropped ",
+ "tx_pause_frames ",
+ "tx_ppp0_frames ",
+ "tx_ppp1_frames ",
+ "tx_ppp2_frames ",
+ "tx_ppp3_frames ",
+ "tx_ppp4_frames ",
+ "tx_ppp5_frames ",
+ "tx_ppp6_frames ",
+ "tx_ppp7_frames ",
+
+ "rx_octets_ok ",
+ "rx_frames_ok ",
+ "rx_broadcast_frames ",
+ "rx_multicast_frames ",
+ "rx_unicast_frames ",
+
+ "rx_frames_too_long ",
+ "rx_jabber_errors ",
+ "rx_fcs_errors ",
+ "rx_length_errors ",
+ "rx_symbol_errors ",
+ "rx_runt_frames ",
+
+ "rx_frames_64 ",
+ "rx_frames_65_to_127 ",
+ "rx_frames_128_to_255 ",
+ "rx_frames_256_to_511 ",
+ "rx_frames_512_to_1023 ",
+ "rx_frames_1024_to_1518 ",
+ "rx_frames_1519_to_max ",
+
+ "rx_pause_frames ",
+ "rx_ppp0_frames ",
+ "rx_ppp1_frames ",
+ "rx_ppp2_frames ",
+ "rx_ppp3_frames ",
+ "rx_ppp4_frames ",
+ "rx_ppp5_frames ",
+ "rx_ppp6_frames ",
+ "rx_ppp7_frames ",
+
+ "rx_bg0_frames_dropped ",
+ "rx_bg1_frames_dropped ",
+ "rx_bg2_frames_dropped ",
+ "rx_bg3_frames_dropped ",
+ "rx_bg0_frames_trunc ",
+ "rx_bg1_frames_trunc ",
+ "rx_bg2_frames_trunc ",
+ "rx_bg3_frames_trunc ",
+
+ "tso ",
+ "tx_csum_offload ",
+ "rx_csum_good ",
+ "vlan_extractions ",
+ "vlan_insertions ",
+ "gro_packets ",
+ "gro_merged ",
};
static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
@@ -211,8 +211,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
+ info->regdump_len = get_regs_len(dev);
- if (adapter->params.fw_vers)
+ if (!adapter->params.fw_vers)
+ strcpy(info->fw_version, "N/A");
+ else
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
@@ -612,6 +615,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_cfg;
u32 speed = ethtool_cmd_speed(cmd);
+ struct link_config old_lc;
+ int ret;
if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
return -EINVAL;
@@ -626,13 +631,11 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
+ old_lc = *lc;
if (cmd->autoneg == AUTONEG_DISABLE) {
cap = speed_to_caps(speed);
- if (!(lc->supported & cap) ||
- (speed == 1000) ||
- (speed == 10000) ||
- (speed == 40000))
+ if (!(lc->supported & cap))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
@@ -645,10 +648,14 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
lc->autoneg = cmd->autoneg;
- if (netif_running(dev))
- return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
- lc);
- return 0;
+ /* If the firmware rejects the Link Configuration request, back out
+ * the changes and report the error.
+ */
+ ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc);
+ if (ret)
+ *lc = old_lc;
+
+ return ret;
}
static void get_pauseparam(struct net_device *dev,
@@ -847,7 +854,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
{
int i, err = 0;
struct adapter *adapter = netdev2adap(dev);
- u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+ u8 *buf = t4_alloc_mem(EEPROMSIZE);
if (!buf)
return -ENOMEM;
@@ -858,7 +865,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
if (!err)
memcpy(data, buf + e->offset, e->len);
- kfree(buf);
+ t4_free_mem(buf);
return err;
}
@@ -887,7 +894,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
/* RMW possibly needed for first or last words.
*/
- buf = kmalloc(aligned_len, GFP_KERNEL);
+ buf = t4_alloc_mem(aligned_len);
if (!buf)
return -ENOMEM;
err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
@@ -915,7 +922,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
err = t4_seeprom_wp(adapter, true);
out:
if (buf != data)
- kfree(buf);
+ t4_free_mem(buf);
return err;
}
@@ -961,6 +968,20 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
return ret;
}
+static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
+{
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ ts_info->phc_index = -1;
+
+ return 0;
+}
+
static u32 get_rss_table_size(struct net_device *dev)
{
const struct port_info *pi = netdev_priv(dev);
@@ -997,11 +1018,15 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
if (!p)
return 0;
- for (i = 0; i < pi->rss_size; i++)
- pi->rss[i] = p[i];
- if (pi->adapter->flags & FULL_INIT_DONE)
+ /* Interface must be brought up atleast once */
+ if (pi->adapter->flags & FULL_INIT_DONE) {
+ for (i = 0; i < pi->rss_size; i++)
+ pi->rss[i] = p[i];
+
return cxgb4_write_rss(pi, pi->rss);
- return 0;
+ }
+
+ return -EPERM;
}
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
@@ -1095,6 +1120,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
.flash_device = set_flash,
+ .get_ts_info = get_ts_info
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f5dcde27e402..2cf81857a297 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -83,7 +83,7 @@ char cxgb4_driver_name[] = KBUILD_MODNAME;
#endif
#define DRV_VERSION "2.0.0-ko"
const char cxgb4_driver_version[] = DRV_VERSION;
-#define DRV_DESC "Chelsio T4/T5 Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
@@ -151,6 +151,7 @@ MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW4_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
+MODULE_FIRMWARE(FW6_FNAME);
/*
* Normally we're willing to become the firmware's Master PF but will be happy
@@ -275,7 +276,7 @@ static void link_report(struct net_device *dev)
else {
static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
- const char *s = "10Mbps";
+ const char *s;
const struct port_info *p = netdev_priv(dev);
switch (p->link_cfg.speed) {
@@ -291,6 +292,10 @@ static void link_report(struct net_device *dev)
case 40000:
s = "40Gbps";
break;
+ default:
+ pr_info("%s: unsupported speed: %d\n",
+ dev->name, p->link_cfg.speed);
+ return;
}
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2959,6 +2964,30 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
data->reg_num, data->val_in);
break;
+ case SIOCGHWTSTAMP:
+ return copy_to_user(req->ifr_data, &pi->tstamp_config,
+ sizeof(pi->tstamp_config)) ?
+ -EFAULT : 0;
+ case SIOCSHWTSTAMP:
+ if (copy_from_user(&pi->tstamp_config, req->ifr_data,
+ sizeof(pi->tstamp_config)))
+ return -EFAULT;
+
+ switch (pi->tstamp_config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pi->rxtstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ pi->rxtstamp = true;
+ break;
+ default:
+ pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ return copy_to_user(req->ifr_data, &pi->tstamp_config,
+ sizeof(pi->tstamp_config)) ?
+ -EFAULT : 0;
default:
return -EOPNOTSUPP;
}
@@ -3670,7 +3699,7 @@ static int adap_init0(struct adapter *adap)
t4_get_tp_version(adap, &adap->params.tp_vers);
ret = t4_check_fw_version(adap);
/* If firmware is too old (not supported by driver) force an update. */
- if (ret == -EFAULT)
+ if (ret)
state = DEV_STATE_UNINIT;
if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
struct fw_info *fw_info;
@@ -4457,6 +4486,10 @@ static int enable_msix(struct adapter *adap)
}
for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
+ dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
+ "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
+ allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs,
+ s->rdmaciqs);
kfree(entries);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 9162746d7729..b7b93e7a643d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1820,11 +1820,34 @@ static noinline int handle_trace_pkt(struct adapter *adap,
return 0;
}
+/**
+ * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
+ * @adap: the adapter
+ * @hwtstamps: time stamp structure to update
+ * @sgetstamp: 60bit iqe timestamp
+ *
+ * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
+ * which is in Core Clock ticks into ktime_t and assign it
+ **/
+static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 sgetstamp)
+{
+ u64 ns;
+ u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
+
+ ns = div_u64(tmp, adap->params.vpd.cclk);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
const struct cpl_rx_pkt *pkt)
{
struct adapter *adapter = rxq->rspq.adap;
struct sge *s = &adapter->sge;
+ struct port_info *pi;
int ret;
struct sk_buff *skb;
@@ -1842,6 +1865,10 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
skb_mark_napi_id(skb, &rxq->rspq.napi);
+ pi = netdev_priv(skb->dev);
+ if (pi->rxtstamp)
+ cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+ gl->sgetstamp);
if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
PKT_HASH_TYPE_L3);
@@ -1877,9 +1904,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
-#ifdef CONFIG_CHELSIO_T4_FCOE
struct port_info *pi;
-#endif
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
@@ -1910,6 +1935,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
+ pi = netdev_priv(skb->dev);
+ if (pi->rxtstamp)
+ cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
+ si->sgetstamp);
if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1926,7 +1955,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
- pi = netdev_priv(skb->dev);
if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
(pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
@@ -2067,6 +2095,8 @@ static int process_responses(struct sge_rspq *q, int budget)
unmap_rx_buf(q->adap, &rxq->fl);
}
+ si.sgetstamp = SGE_TIMESTAMP_G(
+ be64_to_cpu(rc->last_flit));
/*
* Last buffer remains mapped so explicitly make it
* coherent for CPU access.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 44806253c178..cf61a5869c6e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -699,50 +699,107 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
{
static const unsigned int t4_reg_ranges[] = {
0x1008, 0x1108,
- 0x1180, 0x11b4,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
0x11fc, 0x123c,
0x1300, 0x173c,
0x1800, 0x18fc,
- 0x3000, 0x305c,
- 0x3068, 0x30d8,
- 0x30e0, 0x5924,
- 0x5960, 0x59d4,
- 0x5a00, 0x5af8,
+ 0x3000, 0x30d8,
+ 0x30e0, 0x30e4,
+ 0x30ec, 0x5910,
+ 0x5920, 0x5924,
+ 0x5960, 0x5960,
+ 0x5968, 0x5968,
+ 0x5970, 0x5970,
+ 0x5978, 0x5978,
+ 0x5980, 0x5980,
+ 0x5988, 0x5988,
+ 0x5990, 0x5990,
+ 0x5998, 0x5998,
+ 0x59a0, 0x59d4,
+ 0x5a00, 0x5ae0,
+ 0x5ae8, 0x5ae8,
+ 0x5af0, 0x5af0,
+ 0x5af8, 0x5af8,
0x6000, 0x6098,
0x6100, 0x6150,
0x6200, 0x6208,
0x6240, 0x6248,
- 0x6280, 0x6338,
+ 0x6280, 0x62b0,
+ 0x62c0, 0x6338,
0x6370, 0x638c,
0x6400, 0x643c,
0x6500, 0x6524,
- 0x6a00, 0x6a38,
- 0x6a60, 0x6a78,
- 0x6b00, 0x6b84,
- 0x6bf0, 0x6c84,
- 0x6cf0, 0x6d84,
- 0x6df0, 0x6e84,
- 0x6ef0, 0x6f84,
- 0x6ff0, 0x7084,
- 0x70f0, 0x7184,
- 0x71f0, 0x7284,
- 0x72f0, 0x7384,
- 0x73f0, 0x7450,
+ 0x6a00, 0x6a04,
+ 0x6a14, 0x6a38,
+ 0x6a60, 0x6a70,
+ 0x6a78, 0x6a78,
+ 0x6b00, 0x6b0c,
+ 0x6b1c, 0x6b84,
+ 0x6bf0, 0x6bf8,
+ 0x6c00, 0x6c0c,
+ 0x6c1c, 0x6c84,
+ 0x6cf0, 0x6cf8,
+ 0x6d00, 0x6d0c,
+ 0x6d1c, 0x6d84,
+ 0x6df0, 0x6df8,
+ 0x6e00, 0x6e0c,
+ 0x6e1c, 0x6e84,
+ 0x6ef0, 0x6ef8,
+ 0x6f00, 0x6f0c,
+ 0x6f1c, 0x6f84,
+ 0x6ff0, 0x6ff8,
+ 0x7000, 0x700c,
+ 0x701c, 0x7084,
+ 0x70f0, 0x70f8,
+ 0x7100, 0x710c,
+ 0x711c, 0x7184,
+ 0x71f0, 0x71f8,
+ 0x7200, 0x720c,
+ 0x721c, 0x7284,
+ 0x72f0, 0x72f8,
+ 0x7300, 0x730c,
+ 0x731c, 0x7384,
+ 0x73f0, 0x73f8,
+ 0x7400, 0x7450,
0x7500, 0x7530,
- 0x7600, 0x761c,
+ 0x7600, 0x760c,
+ 0x7614, 0x761c,
0x7680, 0x76cc,
0x7700, 0x7798,
0x77c0, 0x77fc,
0x7900, 0x79fc,
- 0x7b00, 0x7c38,
- 0x7d00, 0x7efc,
- 0x8dc0, 0x8e1c,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c38,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d80,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7ea4,
+ 0x7eac, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8e04,
+ 0x8e10, 0x8e1c,
0x8e30, 0x8e78,
- 0x8ea0, 0x8f6c,
- 0x8fc0, 0x9074,
+ 0x8ea0, 0x8eb8,
+ 0x8ec0, 0x8f6c,
+ 0x8fc0, 0x9008,
+ 0x9010, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x9074,
0x90fc, 0x90fc,
- 0x9400, 0x9458,
- 0x9600, 0x96bc,
+ 0x9400, 0x9408,
+ 0x9410, 0x9458,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x96bc,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
@@ -754,23 +811,42 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0x9fec,
- 0xd004, 0xd03c,
+ 0xd004, 0xd004,
+ 0xd010, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0xea7c,
- 0xf000, 0x11110,
- 0x11118, 0x11190,
+ 0xf000, 0x11190,
0x19040, 0x1906c,
0x19078, 0x19080,
- 0x1908c, 0x19124,
- 0x19150, 0x191b0,
+ 0x1908c, 0x190e4,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x1924c,
- 0x193f8, 0x19474,
- 0x19490, 0x194f8,
- 0x19800, 0x19f4c,
- 0x1a000, 0x1a06c,
- 0x1a0b0, 0x1a120,
- 0x1a128, 0x1a138,
+ 0x193f8, 0x1943c,
+ 0x1944c, 0x19474,
+ 0x19490, 0x194e0,
+ 0x194f0, 0x194f8,
+ 0x19800, 0x19c08,
+ 0x19c10, 0x19c90,
+ 0x19ca0, 0x19ce4,
+ 0x19cf0, 0x19d40,
+ 0x19d50, 0x19d94,
+ 0x19da0, 0x19de8,
+ 0x19df0, 0x19e40,
+ 0x19e50, 0x19e90,
+ 0x19ea0, 0x19f4c,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f4,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e040, 0x1e04c,
@@ -823,9 +899,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1ffc0, 0x1ffc8,
0x20000, 0x2002c,
0x20100, 0x2013c,
- 0x20190, 0x201c8,
+ 0x20190, 0x201a0,
+ 0x201a8, 0x201b8,
+ 0x201c4, 0x201c8,
0x20200, 0x20318,
- 0x20400, 0x20528,
+ 0x20400, 0x204b4,
+ 0x204c0, 0x20528,
0x20540, 0x20614,
0x21000, 0x21040,
0x2104c, 0x21060,
@@ -834,22 +913,62 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x21270, 0x21284,
0x212fc, 0x21388,
0x21400, 0x21404,
- 0x21500, 0x21518,
- 0x2152c, 0x2153c,
+ 0x21500, 0x21500,
+ 0x21510, 0x21518,
+ 0x2152c, 0x21530,
+ 0x2153c, 0x2153c,
0x21550, 0x21554,
0x21600, 0x21600,
- 0x21608, 0x21628,
- 0x21630, 0x2163c,
+ 0x21608, 0x2161c,
+ 0x21624, 0x21628,
+ 0x21630, 0x21634,
+ 0x2163c, 0x2163c,
0x21700, 0x2171c,
0x21780, 0x2178c,
- 0x21800, 0x21c38,
- 0x21c80, 0x21d7c,
+ 0x21800, 0x21818,
+ 0x21820, 0x21828,
+ 0x21830, 0x21848,
+ 0x21850, 0x21854,
+ 0x21860, 0x21868,
+ 0x21870, 0x21870,
+ 0x21878, 0x21898,
+ 0x218a0, 0x218a8,
+ 0x218b0, 0x218c8,
+ 0x218d0, 0x218d4,
+ 0x218e0, 0x218e8,
+ 0x218f0, 0x218f0,
+ 0x218f8, 0x21a18,
+ 0x21a20, 0x21a28,
+ 0x21a30, 0x21a48,
+ 0x21a50, 0x21a54,
+ 0x21a60, 0x21a68,
+ 0x21a70, 0x21a70,
+ 0x21a78, 0x21a98,
+ 0x21aa0, 0x21aa8,
+ 0x21ab0, 0x21ac8,
+ 0x21ad0, 0x21ad4,
+ 0x21ae0, 0x21ae8,
+ 0x21af0, 0x21af0,
+ 0x21af8, 0x21c18,
+ 0x21c20, 0x21c20,
+ 0x21c28, 0x21c30,
+ 0x21c38, 0x21c38,
+ 0x21c80, 0x21c98,
+ 0x21ca0, 0x21ca8,
+ 0x21cb0, 0x21cc8,
+ 0x21cd0, 0x21cd4,
+ 0x21ce0, 0x21ce8,
+ 0x21cf0, 0x21cf0,
+ 0x21cf8, 0x21d7c,
0x21e00, 0x21e04,
0x22000, 0x2202c,
0x22100, 0x2213c,
- 0x22190, 0x221c8,
+ 0x22190, 0x221a0,
+ 0x221a8, 0x221b8,
+ 0x221c4, 0x221c8,
0x22200, 0x22318,
- 0x22400, 0x22528,
+ 0x22400, 0x224b4,
+ 0x224c0, 0x22528,
0x22540, 0x22614,
0x23000, 0x23040,
0x2304c, 0x23060,
@@ -858,22 +977,62 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x23270, 0x23284,
0x232fc, 0x23388,
0x23400, 0x23404,
- 0x23500, 0x23518,
- 0x2352c, 0x2353c,
+ 0x23500, 0x23500,
+ 0x23510, 0x23518,
+ 0x2352c, 0x23530,
+ 0x2353c, 0x2353c,
0x23550, 0x23554,
0x23600, 0x23600,
- 0x23608, 0x23628,
- 0x23630, 0x2363c,
+ 0x23608, 0x2361c,
+ 0x23624, 0x23628,
+ 0x23630, 0x23634,
+ 0x2363c, 0x2363c,
0x23700, 0x2371c,
0x23780, 0x2378c,
- 0x23800, 0x23c38,
- 0x23c80, 0x23d7c,
+ 0x23800, 0x23818,
+ 0x23820, 0x23828,
+ 0x23830, 0x23848,
+ 0x23850, 0x23854,
+ 0x23860, 0x23868,
+ 0x23870, 0x23870,
+ 0x23878, 0x23898,
+ 0x238a0, 0x238a8,
+ 0x238b0, 0x238c8,
+ 0x238d0, 0x238d4,
+ 0x238e0, 0x238e8,
+ 0x238f0, 0x238f0,
+ 0x238f8, 0x23a18,
+ 0x23a20, 0x23a28,
+ 0x23a30, 0x23a48,
+ 0x23a50, 0x23a54,
+ 0x23a60, 0x23a68,
+ 0x23a70, 0x23a70,
+ 0x23a78, 0x23a98,
+ 0x23aa0, 0x23aa8,
+ 0x23ab0, 0x23ac8,
+ 0x23ad0, 0x23ad4,
+ 0x23ae0, 0x23ae8,
+ 0x23af0, 0x23af0,
+ 0x23af8, 0x23c18,
+ 0x23c20, 0x23c20,
+ 0x23c28, 0x23c30,
+ 0x23c38, 0x23c38,
+ 0x23c80, 0x23c98,
+ 0x23ca0, 0x23ca8,
+ 0x23cb0, 0x23cc8,
+ 0x23cd0, 0x23cd4,
+ 0x23ce0, 0x23ce8,
+ 0x23cf0, 0x23cf0,
+ 0x23cf8, 0x23d7c,
0x23e00, 0x23e04,
0x24000, 0x2402c,
0x24100, 0x2413c,
- 0x24190, 0x241c8,
+ 0x24190, 0x241a0,
+ 0x241a8, 0x241b8,
+ 0x241c4, 0x241c8,
0x24200, 0x24318,
- 0x24400, 0x24528,
+ 0x24400, 0x244b4,
+ 0x244c0, 0x24528,
0x24540, 0x24614,
0x25000, 0x25040,
0x2504c, 0x25060,
@@ -882,22 +1041,62 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x25270, 0x25284,
0x252fc, 0x25388,
0x25400, 0x25404,
- 0x25500, 0x25518,
- 0x2552c, 0x2553c,
+ 0x25500, 0x25500,
+ 0x25510, 0x25518,
+ 0x2552c, 0x25530,
+ 0x2553c, 0x2553c,
0x25550, 0x25554,
0x25600, 0x25600,
- 0x25608, 0x25628,
- 0x25630, 0x2563c,
+ 0x25608, 0x2561c,
+ 0x25624, 0x25628,
+ 0x25630, 0x25634,
+ 0x2563c, 0x2563c,
0x25700, 0x2571c,
0x25780, 0x2578c,
- 0x25800, 0x25c38,
- 0x25c80, 0x25d7c,
+ 0x25800, 0x25818,
+ 0x25820, 0x25828,
+ 0x25830, 0x25848,
+ 0x25850, 0x25854,
+ 0x25860, 0x25868,
+ 0x25870, 0x25870,
+ 0x25878, 0x25898,
+ 0x258a0, 0x258a8,
+ 0x258b0, 0x258c8,
+ 0x258d0, 0x258d4,
+ 0x258e0, 0x258e8,
+ 0x258f0, 0x258f0,
+ 0x258f8, 0x25a18,
+ 0x25a20, 0x25a28,
+ 0x25a30, 0x25a48,
+ 0x25a50, 0x25a54,
+ 0x25a60, 0x25a68,
+ 0x25a70, 0x25a70,
+ 0x25a78, 0x25a98,
+ 0x25aa0, 0x25aa8,
+ 0x25ab0, 0x25ac8,
+ 0x25ad0, 0x25ad4,
+ 0x25ae0, 0x25ae8,
+ 0x25af0, 0x25af0,
+ 0x25af8, 0x25c18,
+ 0x25c20, 0x25c20,
+ 0x25c28, 0x25c30,
+ 0x25c38, 0x25c38,
+ 0x25c80, 0x25c98,
+ 0x25ca0, 0x25ca8,
+ 0x25cb0, 0x25cc8,
+ 0x25cd0, 0x25cd4,
+ 0x25ce0, 0x25ce8,
+ 0x25cf0, 0x25cf0,
+ 0x25cf8, 0x25d7c,
0x25e00, 0x25e04,
0x26000, 0x2602c,
0x26100, 0x2613c,
- 0x26190, 0x261c8,
+ 0x26190, 0x261a0,
+ 0x261a8, 0x261b8,
+ 0x261c4, 0x261c8,
0x26200, 0x26318,
- 0x26400, 0x26528,
+ 0x26400, 0x264b4,
+ 0x264c0, 0x26528,
0x26540, 0x26614,
0x27000, 0x27040,
0x2704c, 0x27060,
@@ -906,51 +1105,120 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x27270, 0x27284,
0x272fc, 0x27388,
0x27400, 0x27404,
- 0x27500, 0x27518,
- 0x2752c, 0x2753c,
+ 0x27500, 0x27500,
+ 0x27510, 0x27518,
+ 0x2752c, 0x27530,
+ 0x2753c, 0x2753c,
0x27550, 0x27554,
0x27600, 0x27600,
- 0x27608, 0x27628,
- 0x27630, 0x2763c,
+ 0x27608, 0x2761c,
+ 0x27624, 0x27628,
+ 0x27630, 0x27634,
+ 0x2763c, 0x2763c,
0x27700, 0x2771c,
0x27780, 0x2778c,
- 0x27800, 0x27c38,
- 0x27c80, 0x27d7c,
+ 0x27800, 0x27818,
+ 0x27820, 0x27828,
+ 0x27830, 0x27848,
+ 0x27850, 0x27854,
+ 0x27860, 0x27868,
+ 0x27870, 0x27870,
+ 0x27878, 0x27898,
+ 0x278a0, 0x278a8,
+ 0x278b0, 0x278c8,
+ 0x278d0, 0x278d4,
+ 0x278e0, 0x278e8,
+ 0x278f0, 0x278f0,
+ 0x278f8, 0x27a18,
+ 0x27a20, 0x27a28,
+ 0x27a30, 0x27a48,
+ 0x27a50, 0x27a54,
+ 0x27a60, 0x27a68,
+ 0x27a70, 0x27a70,
+ 0x27a78, 0x27a98,
+ 0x27aa0, 0x27aa8,
+ 0x27ab0, 0x27ac8,
+ 0x27ad0, 0x27ad4,
+ 0x27ae0, 0x27ae8,
+ 0x27af0, 0x27af0,
+ 0x27af8, 0x27c18,
+ 0x27c20, 0x27c20,
+ 0x27c28, 0x27c30,
+ 0x27c38, 0x27c38,
+ 0x27c80, 0x27c98,
+ 0x27ca0, 0x27ca8,
+ 0x27cb0, 0x27cc8,
+ 0x27cd0, 0x27cd4,
+ 0x27ce0, 0x27ce8,
+ 0x27cf0, 0x27cf0,
+ 0x27cf8, 0x27d7c,
0x27e00, 0x27e04,
};
static const unsigned int t5_reg_ranges[] = {
- 0x1008, 0x1148,
- 0x1180, 0x11b4,
+ 0x1008, 0x10c0,
+ 0x10cc, 0x10f8,
+ 0x1100, 0x1100,
+ 0x110c, 0x1148,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
0x11fc, 0x123c,
0x1280, 0x173c,
0x1800, 0x18fc,
0x3000, 0x3028,
- 0x3068, 0x30d8,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35ec, 0x35ec,
0x3600, 0x5624,
- 0x56cc, 0x575c,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
0x580c, 0x5814,
- 0x5890, 0x58bc,
- 0x5940, 0x59dc,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x59c8,
+ 0x59d0, 0x59dc,
0x59fc, 0x5a18,
- 0x5a60, 0x5a9c,
+ 0x5a60, 0x5a70,
+ 0x5a80, 0x5a9c,
0x5b94, 0x5bfc,
- 0x6000, 0x6040,
- 0x6058, 0x614c,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x614c,
0x7700, 0x7798,
0x77c0, 0x78fc,
- 0x7b00, 0x7c54,
- 0x7d00, 0x7efc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d80,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
0x8dc0, 0x8de0,
- 0x8df8, 0x8e84,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
0x8ea0, 0x8f84,
- 0x8fc0, 0x90f8,
- 0x9400, 0x9470,
- 0x9600, 0x96f4,
+ 0x8fc0, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9400, 0x9408,
+ 0x9410, 0x9470,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x96f4,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
@@ -962,103 +1230,143 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
- 0xd004, 0xd03c,
+ 0xd004, 0xd004,
+ 0xd010, 0xd03c,
0xdfc0, 0xdfe0,
- 0xe000, 0x11088,
- 0x1109c, 0x11110,
- 0x11118, 0x1117c,
+ 0xe000, 0x1106c,
+ 0x11074, 0x11088,
+ 0x1109c, 0x1117c,
0x11190, 0x11204,
0x19040, 0x1906c,
0x19078, 0x19080,
- 0x1908c, 0x19124,
- 0x19150, 0x191b0,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x19290,
- 0x193f8, 0x19474,
+ 0x193f8, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
- 0x19c00, 0x19c60,
- 0x19c94, 0x19e10,
- 0x19e50, 0x19f34,
+ 0x19c00, 0x19c08,
+ 0x19c10, 0x19c60,
+ 0x19c94, 0x19ce4,
+ 0x19cf0, 0x19d40,
+ 0x19d50, 0x19d94,
+ 0x19da0, 0x19de8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e90,
+ 0x19ea0, 0x19f24,
+ 0x19f34, 0x19f34,
0x19f40, 0x19f50,
- 0x19f90, 0x19fe4,
- 0x1a000, 0x1a06c,
- 0x1a0b0, 0x1a120,
- 0x1a128, 0x1a138,
+ 0x19f90, 0x19fb4,
+ 0x19fc4, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
- 0x1e040, 0x1e04c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
0x1e284, 0x1e290,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e408, 0x1e40c,
- 0x1e440, 0x1e44c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
0x1e684, 0x1e690,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e808, 0x1e80c,
- 0x1e840, 0x1e84c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
0x1ea84, 0x1ea90,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec08, 0x1ec0c,
- 0x1ec40, 0x1ec4c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
0x1ee84, 0x1ee90,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f008, 0x1f00c,
- 0x1f040, 0x1f04c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
0x1f284, 0x1f290,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f408, 0x1f40c,
- 0x1f440, 0x1f44c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
0x1f684, 0x1f690,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f808, 0x1f80c,
- 0x1f840, 0x1f84c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
0x1fa84, 0x1fa90,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc08, 0x1fc0c,
- 0x1fc40, 0x1fc4c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
0x1fe84, 0x1fe90,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x30000, 0x30030,
+ 0x30038, 0x30038,
+ 0x30040, 0x30040,
0x30100, 0x30144,
- 0x30190, 0x301d0,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
0x30200, 0x30318,
- 0x30400, 0x3052c,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
0x30540, 0x3061c,
- 0x30800, 0x30834,
+ 0x30800, 0x30828,
+ 0x30834, 0x30834,
0x308c0, 0x30908,
0x30910, 0x309ac,
- 0x30a00, 0x30a2c,
+ 0x30a00, 0x30a14,
+ 0x30a1c, 0x30a2c,
0x30a44, 0x30a50,
- 0x30a74, 0x30c24,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
0x30d00, 0x30d00,
0x30d08, 0x30d14,
0x30d1c, 0x30d20,
- 0x30d3c, 0x30d50,
+ 0x30d3c, 0x30d3c,
+ 0x30d48, 0x30d50,
0x31200, 0x3120c,
0x31220, 0x31220,
0x31240, 0x31240,
@@ -1078,27 +1386,65 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x322c8, 0x322fc,
0x32600, 0x32630,
0x32a00, 0x32abc,
- 0x32b00, 0x32b70,
- 0x33000, 0x33048,
- 0x33060, 0x3309c,
- 0x330f0, 0x33148,
- 0x33160, 0x3319c,
- 0x331f0, 0x332e4,
- 0x332f8, 0x333e4,
- 0x333f8, 0x33448,
- 0x33460, 0x3349c,
- 0x334f0, 0x33548,
- 0x33560, 0x3359c,
- 0x335f0, 0x336e4,
- 0x336f8, 0x337e4,
+ 0x32b00, 0x32b10,
+ 0x32b20, 0x32b30,
+ 0x32b40, 0x32b50,
+ 0x32b60, 0x32b70,
+ 0x33000, 0x33028,
+ 0x33030, 0x33048,
+ 0x33060, 0x33068,
+ 0x33070, 0x3309c,
+ 0x330f0, 0x33128,
+ 0x33130, 0x33148,
+ 0x33160, 0x33168,
+ 0x33170, 0x3319c,
+ 0x331f0, 0x33238,
+ 0x33240, 0x33240,
+ 0x33248, 0x33250,
+ 0x3325c, 0x33264,
+ 0x33270, 0x332b8,
+ 0x332c0, 0x332e4,
+ 0x332f8, 0x33338,
+ 0x33340, 0x33340,
+ 0x33348, 0x33350,
+ 0x3335c, 0x33364,
+ 0x33370, 0x333b8,
+ 0x333c0, 0x333e4,
+ 0x333f8, 0x33428,
+ 0x33430, 0x33448,
+ 0x33460, 0x33468,
+ 0x33470, 0x3349c,
+ 0x334f0, 0x33528,
+ 0x33530, 0x33548,
+ 0x33560, 0x33568,
+ 0x33570, 0x3359c,
+ 0x335f0, 0x33638,
+ 0x33640, 0x33640,
+ 0x33648, 0x33650,
+ 0x3365c, 0x33664,
+ 0x33670, 0x336b8,
+ 0x336c0, 0x336e4,
+ 0x336f8, 0x33738,
+ 0x33740, 0x33740,
+ 0x33748, 0x33750,
+ 0x3375c, 0x33764,
+ 0x33770, 0x337b8,
+ 0x337c0, 0x337e4,
0x337f8, 0x337fc,
0x33814, 0x33814,
0x3382c, 0x3382c,
0x33880, 0x3388c,
0x338e8, 0x338ec,
- 0x33900, 0x33948,
- 0x33960, 0x3399c,
- 0x339f0, 0x33ae4,
+ 0x33900, 0x33928,
+ 0x33930, 0x33948,
+ 0x33960, 0x33968,
+ 0x33970, 0x3399c,
+ 0x339f0, 0x33a38,
+ 0x33a40, 0x33a40,
+ 0x33a48, 0x33a50,
+ 0x33a5c, 0x33a64,
+ 0x33a70, 0x33ab8,
+ 0x33ac0, 0x33ae4,
0x33af8, 0x33b10,
0x33b28, 0x33b28,
0x33b3c, 0x33b50,
@@ -1107,21 +1453,32 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x33c3c, 0x33c50,
0x33cf0, 0x33cfc,
0x34000, 0x34030,
+ 0x34038, 0x34038,
+ 0x34040, 0x34040,
0x34100, 0x34144,
- 0x34190, 0x341d0,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
0x34200, 0x34318,
- 0x34400, 0x3452c,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
0x34540, 0x3461c,
- 0x34800, 0x34834,
+ 0x34800, 0x34828,
+ 0x34834, 0x34834,
0x348c0, 0x34908,
0x34910, 0x349ac,
- 0x34a00, 0x34a2c,
+ 0x34a00, 0x34a14,
+ 0x34a1c, 0x34a2c,
0x34a44, 0x34a50,
- 0x34a74, 0x34c24,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
0x34d00, 0x34d00,
0x34d08, 0x34d14,
0x34d1c, 0x34d20,
- 0x34d3c, 0x34d50,
+ 0x34d3c, 0x34d3c,
+ 0x34d48, 0x34d50,
0x35200, 0x3520c,
0x35220, 0x35220,
0x35240, 0x35240,
@@ -1141,27 +1498,65 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x362c8, 0x362fc,
0x36600, 0x36630,
0x36a00, 0x36abc,
- 0x36b00, 0x36b70,
- 0x37000, 0x37048,
- 0x37060, 0x3709c,
- 0x370f0, 0x37148,
- 0x37160, 0x3719c,
- 0x371f0, 0x372e4,
- 0x372f8, 0x373e4,
- 0x373f8, 0x37448,
- 0x37460, 0x3749c,
- 0x374f0, 0x37548,
- 0x37560, 0x3759c,
- 0x375f0, 0x376e4,
- 0x376f8, 0x377e4,
+ 0x36b00, 0x36b10,
+ 0x36b20, 0x36b30,
+ 0x36b40, 0x36b50,
+ 0x36b60, 0x36b70,
+ 0x37000, 0x37028,
+ 0x37030, 0x37048,
+ 0x37060, 0x37068,
+ 0x37070, 0x3709c,
+ 0x370f0, 0x37128,
+ 0x37130, 0x37148,
+ 0x37160, 0x37168,
+ 0x37170, 0x3719c,
+ 0x371f0, 0x37238,
+ 0x37240, 0x37240,
+ 0x37248, 0x37250,
+ 0x3725c, 0x37264,
+ 0x37270, 0x372b8,
+ 0x372c0, 0x372e4,
+ 0x372f8, 0x37338,
+ 0x37340, 0x37340,
+ 0x37348, 0x37350,
+ 0x3735c, 0x37364,
+ 0x37370, 0x373b8,
+ 0x373c0, 0x373e4,
+ 0x373f8, 0x37428,
+ 0x37430, 0x37448,
+ 0x37460, 0x37468,
+ 0x37470, 0x3749c,
+ 0x374f0, 0x37528,
+ 0x37530, 0x37548,
+ 0x37560, 0x37568,
+ 0x37570, 0x3759c,
+ 0x375f0, 0x37638,
+ 0x37640, 0x37640,
+ 0x37648, 0x37650,
+ 0x3765c, 0x37664,
+ 0x37670, 0x376b8,
+ 0x376c0, 0x376e4,
+ 0x376f8, 0x37738,
+ 0x37740, 0x37740,
+ 0x37748, 0x37750,
+ 0x3775c, 0x37764,
+ 0x37770, 0x377b8,
+ 0x377c0, 0x377e4,
0x377f8, 0x377fc,
0x37814, 0x37814,
0x3782c, 0x3782c,
0x37880, 0x3788c,
0x378e8, 0x378ec,
- 0x37900, 0x37948,
- 0x37960, 0x3799c,
- 0x379f0, 0x37ae4,
+ 0x37900, 0x37928,
+ 0x37930, 0x37948,
+ 0x37960, 0x37968,
+ 0x37970, 0x3799c,
+ 0x379f0, 0x37a38,
+ 0x37a40, 0x37a40,
+ 0x37a48, 0x37a50,
+ 0x37a5c, 0x37a64,
+ 0x37a70, 0x37ab8,
+ 0x37ac0, 0x37ae4,
0x37af8, 0x37b10,
0x37b28, 0x37b28,
0x37b3c, 0x37b50,
@@ -1170,21 +1565,32 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x37c3c, 0x37c50,
0x37cf0, 0x37cfc,
0x38000, 0x38030,
+ 0x38038, 0x38038,
+ 0x38040, 0x38040,
0x38100, 0x38144,
- 0x38190, 0x381d0,
+ 0x38190, 0x381a0,
+ 0x381a8, 0x381b8,
+ 0x381c4, 0x381c8,
+ 0x381d0, 0x381d0,
0x38200, 0x38318,
- 0x38400, 0x3852c,
+ 0x38400, 0x384b4,
+ 0x384c0, 0x3852c,
0x38540, 0x3861c,
- 0x38800, 0x38834,
+ 0x38800, 0x38828,
+ 0x38834, 0x38834,
0x388c0, 0x38908,
0x38910, 0x389ac,
- 0x38a00, 0x38a2c,
+ 0x38a00, 0x38a14,
+ 0x38a1c, 0x38a2c,
0x38a44, 0x38a50,
- 0x38a74, 0x38c24,
+ 0x38a74, 0x38a74,
+ 0x38a7c, 0x38afc,
+ 0x38b08, 0x38c24,
0x38d00, 0x38d00,
0x38d08, 0x38d14,
0x38d1c, 0x38d20,
- 0x38d3c, 0x38d50,
+ 0x38d3c, 0x38d3c,
+ 0x38d48, 0x38d50,
0x39200, 0x3920c,
0x39220, 0x39220,
0x39240, 0x39240,
@@ -1204,27 +1610,65 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3a2c8, 0x3a2fc,
0x3a600, 0x3a630,
0x3aa00, 0x3aabc,
- 0x3ab00, 0x3ab70,
- 0x3b000, 0x3b048,
- 0x3b060, 0x3b09c,
- 0x3b0f0, 0x3b148,
- 0x3b160, 0x3b19c,
- 0x3b1f0, 0x3b2e4,
- 0x3b2f8, 0x3b3e4,
- 0x3b3f8, 0x3b448,
- 0x3b460, 0x3b49c,
- 0x3b4f0, 0x3b548,
- 0x3b560, 0x3b59c,
- 0x3b5f0, 0x3b6e4,
- 0x3b6f8, 0x3b7e4,
+ 0x3ab00, 0x3ab10,
+ 0x3ab20, 0x3ab30,
+ 0x3ab40, 0x3ab50,
+ 0x3ab60, 0x3ab70,
+ 0x3b000, 0x3b028,
+ 0x3b030, 0x3b048,
+ 0x3b060, 0x3b068,
+ 0x3b070, 0x3b09c,
+ 0x3b0f0, 0x3b128,
+ 0x3b130, 0x3b148,
+ 0x3b160, 0x3b168,
+ 0x3b170, 0x3b19c,
+ 0x3b1f0, 0x3b238,
+ 0x3b240, 0x3b240,
+ 0x3b248, 0x3b250,
+ 0x3b25c, 0x3b264,
+ 0x3b270, 0x3b2b8,
+ 0x3b2c0, 0x3b2e4,
+ 0x3b2f8, 0x3b338,
+ 0x3b340, 0x3b340,
+ 0x3b348, 0x3b350,
+ 0x3b35c, 0x3b364,
+ 0x3b370, 0x3b3b8,
+ 0x3b3c0, 0x3b3e4,
+ 0x3b3f8, 0x3b428,
+ 0x3b430, 0x3b448,
+ 0x3b460, 0x3b468,
+ 0x3b470, 0x3b49c,
+ 0x3b4f0, 0x3b528,
+ 0x3b530, 0x3b548,
+ 0x3b560, 0x3b568,
+ 0x3b570, 0x3b59c,
+ 0x3b5f0, 0x3b638,
+ 0x3b640, 0x3b640,
+ 0x3b648, 0x3b650,
+ 0x3b65c, 0x3b664,
+ 0x3b670, 0x3b6b8,
+ 0x3b6c0, 0x3b6e4,
+ 0x3b6f8, 0x3b738,
+ 0x3b740, 0x3b740,
+ 0x3b748, 0x3b750,
+ 0x3b75c, 0x3b764,
+ 0x3b770, 0x3b7b8,
+ 0x3b7c0, 0x3b7e4,
0x3b7f8, 0x3b7fc,
0x3b814, 0x3b814,
0x3b82c, 0x3b82c,
0x3b880, 0x3b88c,
0x3b8e8, 0x3b8ec,
- 0x3b900, 0x3b948,
- 0x3b960, 0x3b99c,
- 0x3b9f0, 0x3bae4,
+ 0x3b900, 0x3b928,
+ 0x3b930, 0x3b948,
+ 0x3b960, 0x3b968,
+ 0x3b970, 0x3b99c,
+ 0x3b9f0, 0x3ba38,
+ 0x3ba40, 0x3ba40,
+ 0x3ba48, 0x3ba50,
+ 0x3ba5c, 0x3ba64,
+ 0x3ba70, 0x3bab8,
+ 0x3bac0, 0x3bae4,
0x3baf8, 0x3bb10,
0x3bb28, 0x3bb28,
0x3bb3c, 0x3bb50,
@@ -1233,21 +1677,32 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3bc3c, 0x3bc50,
0x3bcf0, 0x3bcfc,
0x3c000, 0x3c030,
+ 0x3c038, 0x3c038,
+ 0x3c040, 0x3c040,
0x3c100, 0x3c144,
- 0x3c190, 0x3c1d0,
+ 0x3c190, 0x3c1a0,
+ 0x3c1a8, 0x3c1b8,
+ 0x3c1c4, 0x3c1c8,
+ 0x3c1d0, 0x3c1d0,
0x3c200, 0x3c318,
- 0x3c400, 0x3c52c,
+ 0x3c400, 0x3c4b4,
+ 0x3c4c0, 0x3c52c,
0x3c540, 0x3c61c,
- 0x3c800, 0x3c834,
+ 0x3c800, 0x3c828,
+ 0x3c834, 0x3c834,
0x3c8c0, 0x3c908,
0x3c910, 0x3c9ac,
- 0x3ca00, 0x3ca2c,
+ 0x3ca00, 0x3ca14,
+ 0x3ca1c, 0x3ca2c,
0x3ca44, 0x3ca50,
- 0x3ca74, 0x3cc24,
+ 0x3ca74, 0x3ca74,
+ 0x3ca7c, 0x3cafc,
+ 0x3cb08, 0x3cc24,
0x3cd00, 0x3cd00,
0x3cd08, 0x3cd14,
0x3cd1c, 0x3cd20,
- 0x3cd3c, 0x3cd50,
+ 0x3cd3c, 0x3cd3c,
+ 0x3cd48, 0x3cd50,
0x3d200, 0x3d20c,
0x3d220, 0x3d220,
0x3d240, 0x3d240,
@@ -1267,27 +1722,65 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3e2c8, 0x3e2fc,
0x3e600, 0x3e630,
0x3ea00, 0x3eabc,
- 0x3eb00, 0x3eb70,
- 0x3f000, 0x3f048,
- 0x3f060, 0x3f09c,
- 0x3f0f0, 0x3f148,
- 0x3f160, 0x3f19c,
- 0x3f1f0, 0x3f2e4,
- 0x3f2f8, 0x3f3e4,
- 0x3f3f8, 0x3f448,
- 0x3f460, 0x3f49c,
- 0x3f4f0, 0x3f548,
- 0x3f560, 0x3f59c,
- 0x3f5f0, 0x3f6e4,
- 0x3f6f8, 0x3f7e4,
+ 0x3eb00, 0x3eb10,
+ 0x3eb20, 0x3eb30,
+ 0x3eb40, 0x3eb50,
+ 0x3eb60, 0x3eb70,
+ 0x3f000, 0x3f028,
+ 0x3f030, 0x3f048,
+ 0x3f060, 0x3f068,
+ 0x3f070, 0x3f09c,
+ 0x3f0f0, 0x3f128,
+ 0x3f130, 0x3f148,
+ 0x3f160, 0x3f168,
+ 0x3f170, 0x3f19c,
+ 0x3f1f0, 0x3f238,
+ 0x3f240, 0x3f240,
+ 0x3f248, 0x3f250,
+ 0x3f25c, 0x3f264,
+ 0x3f270, 0x3f2b8,
+ 0x3f2c0, 0x3f2e4,
+ 0x3f2f8, 0x3f338,
+ 0x3f340, 0x3f340,
+ 0x3f348, 0x3f350,
+ 0x3f35c, 0x3f364,
+ 0x3f370, 0x3f3b8,
+ 0x3f3c0, 0x3f3e4,
+ 0x3f3f8, 0x3f428,
+ 0x3f430, 0x3f448,
+ 0x3f460, 0x3f468,
+ 0x3f470, 0x3f49c,
+ 0x3f4f0, 0x3f528,
+ 0x3f530, 0x3f548,
+ 0x3f560, 0x3f568,
+ 0x3f570, 0x3f59c,
+ 0x3f5f0, 0x3f638,
+ 0x3f640, 0x3f640,
+ 0x3f648, 0x3f650,
+ 0x3f65c, 0x3f664,
+ 0x3f670, 0x3f6b8,
+ 0x3f6c0, 0x3f6e4,
+ 0x3f6f8, 0x3f738,
+ 0x3f740, 0x3f740,
+ 0x3f748, 0x3f750,
+ 0x3f75c, 0x3f764,
+ 0x3f770, 0x3f7b8,
+ 0x3f7c0, 0x3f7e4,
0x3f7f8, 0x3f7fc,
0x3f814, 0x3f814,
0x3f82c, 0x3f82c,
0x3f880, 0x3f88c,
0x3f8e8, 0x3f8ec,
- 0x3f900, 0x3f948,
- 0x3f960, 0x3f99c,
- 0x3f9f0, 0x3fae4,
+ 0x3f900, 0x3f928,
+ 0x3f930, 0x3f948,
+ 0x3f960, 0x3f968,
+ 0x3f970, 0x3f99c,
+ 0x3f9f0, 0x3fa38,
+ 0x3fa40, 0x3fa40,
+ 0x3fa48, 0x3fa50,
+ 0x3fa5c, 0x3fa64,
+ 0x3fa70, 0x3fab8,
+ 0x3fac0, 0x3fae4,
0x3faf8, 0x3fb10,
0x3fb28, 0x3fb28,
0x3fb3c, 0x3fb50,
@@ -1296,108 +1789,224 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3fc3c, 0x3fc50,
0x3fcf0, 0x3fcfc,
0x40000, 0x4000c,
- 0x40040, 0x40068,
- 0x4007c, 0x40144,
+ 0x40040, 0x40050,
+ 0x40060, 0x40068,
+ 0x4007c, 0x4008c,
+ 0x40094, 0x400b0,
+ 0x400c0, 0x40144,
0x40180, 0x4018c,
- 0x40200, 0x40298,
- 0x402ac, 0x4033c,
+ 0x40200, 0x40254,
+ 0x40260, 0x40264,
+ 0x40270, 0x40288,
+ 0x40290, 0x40298,
+ 0x402ac, 0x402c8,
+ 0x402d0, 0x402e0,
+ 0x402f0, 0x402f0,
+ 0x40300, 0x4033c,
0x403f8, 0x403fc,
0x41304, 0x413c4,
- 0x41400, 0x4141c,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
0x41480, 0x414d0,
- 0x44000, 0x44078,
- 0x440c0, 0x44278,
- 0x442c0, 0x44478,
- 0x444c0, 0x44678,
- 0x446c0, 0x44878,
- 0x448c0, 0x449fc,
- 0x45000, 0x45068,
+ 0x44000, 0x44054,
+ 0x4405c, 0x44078,
+ 0x440c0, 0x44174,
+ 0x44180, 0x441ac,
+ 0x441b4, 0x441b8,
+ 0x441c0, 0x44254,
+ 0x4425c, 0x44278,
+ 0x442c0, 0x44374,
+ 0x44380, 0x443ac,
+ 0x443b4, 0x443b8,
+ 0x443c0, 0x44454,
+ 0x4445c, 0x44478,
+ 0x444c0, 0x44574,
+ 0x44580, 0x445ac,
+ 0x445b4, 0x445b8,
+ 0x445c0, 0x44654,
+ 0x4465c, 0x44678,
+ 0x446c0, 0x44774,
+ 0x44780, 0x447ac,
+ 0x447b4, 0x447b8,
+ 0x447c0, 0x44854,
+ 0x4485c, 0x44878,
+ 0x448c0, 0x44974,
+ 0x44980, 0x449ac,
+ 0x449b4, 0x449b8,
+ 0x449c0, 0x449fc,
+ 0x45000, 0x45004,
+ 0x45010, 0x45030,
+ 0x45040, 0x45060,
+ 0x45068, 0x45068,
0x45080, 0x45084,
0x450a0, 0x450b0,
- 0x45200, 0x45268,
+ 0x45200, 0x45204,
+ 0x45210, 0x45230,
+ 0x45240, 0x45260,
+ 0x45268, 0x45268,
0x45280, 0x45284,
0x452a0, 0x452b0,
0x460c0, 0x460e4,
- 0x47000, 0x4708c,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
0x47200, 0x47250,
- 0x47400, 0x47420,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
0x47600, 0x47618,
0x47800, 0x47814,
0x48000, 0x4800c,
- 0x48040, 0x48068,
- 0x4807c, 0x48144,
+ 0x48040, 0x48050,
+ 0x48060, 0x48068,
+ 0x4807c, 0x4808c,
+ 0x48094, 0x480b0,
+ 0x480c0, 0x48144,
0x48180, 0x4818c,
- 0x48200, 0x48298,
- 0x482ac, 0x4833c,
+ 0x48200, 0x48254,
+ 0x48260, 0x48264,
+ 0x48270, 0x48288,
+ 0x48290, 0x48298,
+ 0x482ac, 0x482c8,
+ 0x482d0, 0x482e0,
+ 0x482f0, 0x482f0,
+ 0x48300, 0x4833c,
0x483f8, 0x483fc,
0x49304, 0x493c4,
- 0x49400, 0x4941c,
+ 0x49400, 0x4940c,
+ 0x49414, 0x4941c,
0x49480, 0x494d0,
- 0x4c000, 0x4c078,
- 0x4c0c0, 0x4c278,
- 0x4c2c0, 0x4c478,
- 0x4c4c0, 0x4c678,
- 0x4c6c0, 0x4c878,
- 0x4c8c0, 0x4c9fc,
- 0x4d000, 0x4d068,
+ 0x4c000, 0x4c054,
+ 0x4c05c, 0x4c078,
+ 0x4c0c0, 0x4c174,
+ 0x4c180, 0x4c1ac,
+ 0x4c1b4, 0x4c1b8,
+ 0x4c1c0, 0x4c254,
+ 0x4c25c, 0x4c278,
+ 0x4c2c0, 0x4c374,
+ 0x4c380, 0x4c3ac,
+ 0x4c3b4, 0x4c3b8,
+ 0x4c3c0, 0x4c454,
+ 0x4c45c, 0x4c478,
+ 0x4c4c0, 0x4c574,
+ 0x4c580, 0x4c5ac,
+ 0x4c5b4, 0x4c5b8,
+ 0x4c5c0, 0x4c654,
+ 0x4c65c, 0x4c678,
+ 0x4c6c0, 0x4c774,
+ 0x4c780, 0x4c7ac,
+ 0x4c7b4, 0x4c7b8,
+ 0x4c7c0, 0x4c854,
+ 0x4c85c, 0x4c878,
+ 0x4c8c0, 0x4c974,
+ 0x4c980, 0x4c9ac,
+ 0x4c9b4, 0x4c9b8,
+ 0x4c9c0, 0x4c9fc,
+ 0x4d000, 0x4d004,
+ 0x4d010, 0x4d030,
+ 0x4d040, 0x4d060,
+ 0x4d068, 0x4d068,
0x4d080, 0x4d084,
0x4d0a0, 0x4d0b0,
- 0x4d200, 0x4d268,
+ 0x4d200, 0x4d204,
+ 0x4d210, 0x4d230,
+ 0x4d240, 0x4d260,
+ 0x4d268, 0x4d268,
0x4d280, 0x4d284,
0x4d2a0, 0x4d2b0,
0x4e0c0, 0x4e0e4,
- 0x4f000, 0x4f08c,
+ 0x4f000, 0x4f03c,
+ 0x4f044, 0x4f08c,
0x4f200, 0x4f250,
- 0x4f400, 0x4f420,
+ 0x4f400, 0x4f408,
+ 0x4f414, 0x4f420,
0x4f600, 0x4f618,
0x4f800, 0x4f814,
- 0x50000, 0x500cc,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
0x50400, 0x50400,
- 0x50800, 0x508cc,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
0x50c00, 0x50c00,
0x51000, 0x5101c,
0x51300, 0x51308,
};
static const unsigned int t6_reg_ranges[] = {
- 0x1008, 0x1124,
- 0x1138, 0x114c,
- 0x1180, 0x11b4,
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x114c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
0x11fc, 0x1254,
0x1280, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
- 0x3060, 0x30d8,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35ec, 0x35ec,
0x3600, 0x5624,
- 0x56cc, 0x575c,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
0x580c, 0x5814,
- 0x5890, 0x58bc,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
0x5940, 0x595c,
0x5980, 0x598c,
- 0x59b0, 0x59dc,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
0x59fc, 0x5a18,
0x5a60, 0x5a6c,
- 0x5a80, 0x5a9c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
0x5b94, 0x5bfc,
- 0x5c10, 0x5ec0,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
0x5ec8, 0x5ecc,
- 0x6000, 0x6040,
- 0x6058, 0x619c,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x619c,
0x7700, 0x7798,
0x77c0, 0x7880,
0x78cc, 0x78fc,
- 0x7b00, 0x7c54,
- 0x7d00, 0x7efc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d84,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
0x8dc0, 0x8de4,
- 0x8df8, 0x8e84,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
0x8ea0, 0x8f88,
- 0x8fb8, 0x9124,
+ 0x8fb8, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9100, 0x9124,
0x9400, 0x9470,
- 0x9600, 0x971c,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
@@ -1411,109 +2020,170 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9f80, 0xa020,
0xd004, 0xd03c,
0xd100, 0xd118,
- 0xd200, 0xd31c,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
0xdfc0, 0xdfe0,
0xe000, 0xf008,
0x11000, 0x11014,
- 0x11048, 0x1117c,
- 0x11190, 0x11270,
+ 0x11048, 0x1106c,
+ 0x11074, 0x11088,
+ 0x11098, 0x11120,
+ 0x1112c, 0x1117c,
+ 0x11190, 0x112e0,
0x11300, 0x1130c,
0x12000, 0x1206c,
0x19040, 0x1906c,
0x19078, 0x19080,
- 0x1908c, 0x19124,
- 0x19150, 0x191b0,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
0x191d0, 0x191e8,
- 0x19238, 0x192bc,
- 0x193f8, 0x19474,
+ 0x19238, 0x192b0,
+ 0x192bc, 0x192bc,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19418,
+ 0x19420, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
- 0x19c00, 0x19c80,
- 0x19c94, 0x19cbc,
- 0x19ce4, 0x19d28,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cbc,
+ 0x19ce4, 0x19ce4,
+ 0x19cf0, 0x19cf8,
+ 0x19d00, 0x19d28,
0x19d50, 0x19d78,
- 0x19d94, 0x19dc8,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19dc8,
0x19df0, 0x19e10,
0x19e50, 0x19e6c,
- 0x19ea0, 0x19f34,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
0x19f40, 0x19f50,
0x19f90, 0x19fac,
- 0x19fc4, 0x19fe4,
- 0x1a000, 0x1a06c,
- 0x1a0b0, 0x1a120,
- 0x1a128, 0x1a138,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
- 0x1e040, 0x1e04c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
0x1e284, 0x1e290,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e408, 0x1e40c,
- 0x1e440, 0x1e44c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
0x1e684, 0x1e690,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e808, 0x1e80c,
- 0x1e840, 0x1e84c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
0x1ea84, 0x1ea90,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec08, 0x1ec0c,
- 0x1ec40, 0x1ec4c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
0x1ee84, 0x1ee90,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f008, 0x1f00c,
- 0x1f040, 0x1f04c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
0x1f284, 0x1f290,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f408, 0x1f40c,
- 0x1f440, 0x1f44c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
0x1f684, 0x1f690,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f808, 0x1f80c,
- 0x1f840, 0x1f84c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
0x1fa84, 0x1fa90,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc08, 0x1fc0c,
- 0x1fc40, 0x1fc4c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
0x1fe84, 0x1fe90,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
- 0x30000, 0x30070,
- 0x30100, 0x301d0,
+ 0x30000, 0x30030,
+ 0x30038, 0x30038,
+ 0x30040, 0x30040,
+ 0x30048, 0x30048,
+ 0x30050, 0x30050,
+ 0x3005c, 0x30060,
+ 0x30068, 0x30068,
+ 0x30070, 0x30070,
+ 0x30100, 0x30168,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
0x30200, 0x30320,
- 0x30400, 0x3052c,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
0x30540, 0x3061c,
- 0x30800, 0x30890,
+ 0x30800, 0x308a0,
0x308c0, 0x30908,
0x30910, 0x309b8,
0x30a00, 0x30a04,
- 0x30a0c, 0x30a2c,
+ 0x30a0c, 0x30a14,
+ 0x30a1c, 0x30a2c,
0x30a44, 0x30a50,
- 0x30a74, 0x30c24,
- 0x30d00, 0x30d3c,
- 0x30d44, 0x30d7c,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
+ 0x30d00, 0x30d14,
+ 0x30d1c, 0x30d3c,
+ 0x30d44, 0x30d4c,
+ 0x30d54, 0x30d74,
+ 0x30d7c, 0x30d7c,
0x30de0, 0x30de0,
0x30e00, 0x30ed4,
0x30f00, 0x30fa4,
@@ -1542,7 +2212,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x31bb0, 0x31bb4,
0x31bc8, 0x31bd4,
0x32140, 0x3218c,
- 0x321f0, 0x32200,
+ 0x321f0, 0x321f4,
+ 0x32200, 0x32200,
0x32218, 0x32218,
0x32400, 0x32400,
0x32408, 0x3241c,
@@ -1551,46 +2222,108 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x326a8, 0x326a8,
0x326ec, 0x326ec,
0x32a00, 0x32abc,
- 0x32b00, 0x32b78,
+ 0x32b00, 0x32b38,
+ 0x32b40, 0x32b58,
+ 0x32b60, 0x32b78,
0x32c00, 0x32c00,
0x32c08, 0x32c3c,
0x32e00, 0x32e2c,
0x32f00, 0x32f2c,
- 0x33000, 0x330ac,
- 0x330c0, 0x331ac,
- 0x331c0, 0x332c4,
- 0x332e4, 0x333c4,
- 0x333e4, 0x334ac,
- 0x334c0, 0x335ac,
- 0x335c0, 0x336c4,
- 0x336e4, 0x337c4,
+ 0x33000, 0x3302c,
+ 0x33034, 0x33050,
+ 0x33058, 0x33058,
+ 0x33060, 0x3308c,
+ 0x3309c, 0x330ac,
+ 0x330c0, 0x330c0,
+ 0x330c8, 0x330d0,
+ 0x330d8, 0x330e0,
+ 0x330ec, 0x3312c,
+ 0x33134, 0x33150,
+ 0x33158, 0x33158,
+ 0x33160, 0x3318c,
+ 0x3319c, 0x331ac,
+ 0x331c0, 0x331c0,
+ 0x331c8, 0x331d0,
+ 0x331d8, 0x331e0,
+ 0x331ec, 0x33290,
+ 0x33298, 0x332c4,
+ 0x332e4, 0x33390,
+ 0x33398, 0x333c4,
+ 0x333e4, 0x3342c,
+ 0x33434, 0x33450,
+ 0x33458, 0x33458,
+ 0x33460, 0x3348c,
+ 0x3349c, 0x334ac,
+ 0x334c0, 0x334c0,
+ 0x334c8, 0x334d0,
+ 0x334d8, 0x334e0,
+ 0x334ec, 0x3352c,
+ 0x33534, 0x33550,
+ 0x33558, 0x33558,
+ 0x33560, 0x3358c,
+ 0x3359c, 0x335ac,
+ 0x335c0, 0x335c0,
+ 0x335c8, 0x335d0,
+ 0x335d8, 0x335e0,
+ 0x335ec, 0x33690,
+ 0x33698, 0x336c4,
+ 0x336e4, 0x33790,
+ 0x33798, 0x337c4,
0x337e4, 0x337fc,
0x33814, 0x33814,
0x33854, 0x33868,
0x33880, 0x3388c,
0x338c0, 0x338d0,
0x338e8, 0x338ec,
- 0x33900, 0x339ac,
- 0x339c0, 0x33ac4,
+ 0x33900, 0x3392c,
+ 0x33934, 0x33950,
+ 0x33958, 0x33958,
+ 0x33960, 0x3398c,
+ 0x3399c, 0x339ac,
+ 0x339c0, 0x339c0,
+ 0x339c8, 0x339d0,
+ 0x339d8, 0x339e0,
+ 0x339ec, 0x33a90,
+ 0x33a98, 0x33ac4,
0x33ae4, 0x33b10,
- 0x33b24, 0x33b50,
+ 0x33b24, 0x33b28,
+ 0x33b38, 0x33b50,
0x33bf0, 0x33c10,
- 0x33c24, 0x33c50,
+ 0x33c24, 0x33c28,
+ 0x33c38, 0x33c50,
0x33cf0, 0x33cfc,
- 0x34000, 0x34070,
- 0x34100, 0x341d0,
+ 0x34000, 0x34030,
+ 0x34038, 0x34038,
+ 0x34040, 0x34040,
+ 0x34048, 0x34048,
+ 0x34050, 0x34050,
+ 0x3405c, 0x34060,
+ 0x34068, 0x34068,
+ 0x34070, 0x34070,
+ 0x34100, 0x34168,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
0x34200, 0x34320,
- 0x34400, 0x3452c,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
0x34540, 0x3461c,
- 0x34800, 0x34890,
+ 0x34800, 0x348a0,
0x348c0, 0x34908,
0x34910, 0x349b8,
0x34a00, 0x34a04,
- 0x34a0c, 0x34a2c,
+ 0x34a0c, 0x34a14,
+ 0x34a1c, 0x34a2c,
0x34a44, 0x34a50,
- 0x34a74, 0x34c24,
- 0x34d00, 0x34d3c,
- 0x34d44, 0x34d7c,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
+ 0x34d00, 0x34d14,
+ 0x34d1c, 0x34d3c,
+ 0x34d44, 0x34d4c,
+ 0x34d54, 0x34d74,
+ 0x34d7c, 0x34d7c,
0x34de0, 0x34de0,
0x34e00, 0x34ed4,
0x34f00, 0x34fa4,
@@ -1619,7 +2352,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x35bb0, 0x35bb4,
0x35bc8, 0x35bd4,
0x36140, 0x3618c,
- 0x361f0, 0x36200,
+ 0x361f0, 0x361f4,
+ 0x36200, 0x36200,
0x36218, 0x36218,
0x36400, 0x36400,
0x36408, 0x3641c,
@@ -1628,31 +2362,75 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x366a8, 0x366a8,
0x366ec, 0x366ec,
0x36a00, 0x36abc,
- 0x36b00, 0x36b78,
+ 0x36b00, 0x36b38,
+ 0x36b40, 0x36b58,
+ 0x36b60, 0x36b78,
0x36c00, 0x36c00,
0x36c08, 0x36c3c,
0x36e00, 0x36e2c,
0x36f00, 0x36f2c,
- 0x37000, 0x370ac,
- 0x370c0, 0x371ac,
- 0x371c0, 0x372c4,
- 0x372e4, 0x373c4,
- 0x373e4, 0x374ac,
- 0x374c0, 0x375ac,
- 0x375c0, 0x376c4,
- 0x376e4, 0x377c4,
+ 0x37000, 0x3702c,
+ 0x37034, 0x37050,
+ 0x37058, 0x37058,
+ 0x37060, 0x3708c,
+ 0x3709c, 0x370ac,
+ 0x370c0, 0x370c0,
+ 0x370c8, 0x370d0,
+ 0x370d8, 0x370e0,
+ 0x370ec, 0x3712c,
+ 0x37134, 0x37150,
+ 0x37158, 0x37158,
+ 0x37160, 0x3718c,
+ 0x3719c, 0x371ac,
+ 0x371c0, 0x371c0,
+ 0x371c8, 0x371d0,
+ 0x371d8, 0x371e0,
+ 0x371ec, 0x37290,
+ 0x37298, 0x372c4,
+ 0x372e4, 0x37390,
+ 0x37398, 0x373c4,
+ 0x373e4, 0x3742c,
+ 0x37434, 0x37450,
+ 0x37458, 0x37458,
+ 0x37460, 0x3748c,
+ 0x3749c, 0x374ac,
+ 0x374c0, 0x374c0,
+ 0x374c8, 0x374d0,
+ 0x374d8, 0x374e0,
+ 0x374ec, 0x3752c,
+ 0x37534, 0x37550,
+ 0x37558, 0x37558,
+ 0x37560, 0x3758c,
+ 0x3759c, 0x375ac,
+ 0x375c0, 0x375c0,
+ 0x375c8, 0x375d0,
+ 0x375d8, 0x375e0,
+ 0x375ec, 0x37690,
+ 0x37698, 0x376c4,
+ 0x376e4, 0x37790,
+ 0x37798, 0x377c4,
0x377e4, 0x377fc,
0x37814, 0x37814,
0x37854, 0x37868,
0x37880, 0x3788c,
0x378c0, 0x378d0,
0x378e8, 0x378ec,
- 0x37900, 0x379ac,
- 0x379c0, 0x37ac4,
+ 0x37900, 0x3792c,
+ 0x37934, 0x37950,
+ 0x37958, 0x37958,
+ 0x37960, 0x3798c,
+ 0x3799c, 0x379ac,
+ 0x379c0, 0x379c0,
+ 0x379c8, 0x379d0,
+ 0x379d8, 0x379e0,
+ 0x379ec, 0x37a90,
+ 0x37a98, 0x37ac4,
0x37ae4, 0x37b10,
- 0x37b24, 0x37b50,
+ 0x37b24, 0x37b28,
+ 0x37b38, 0x37b50,
0x37bf0, 0x37c10,
- 0x37c24, 0x37c50,
+ 0x37c24, 0x37c28,
+ 0x37c38, 0x37c50,
0x37cf0, 0x37cfc,
0x40040, 0x40040,
0x40080, 0x40084,
@@ -1664,36 +2442,62 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x40280, 0x40280,
0x40304, 0x40304,
0x40330, 0x4033c,
- 0x41304, 0x413dc,
- 0x41400, 0x4141c,
+ 0x41304, 0x413c8,
+ 0x413d0, 0x413dc,
+ 0x413f0, 0x413f0,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x4407c,
- 0x440c0, 0x4427c,
- 0x442c0, 0x4447c,
- 0x444c0, 0x4467c,
- 0x446c0, 0x4487c,
- 0x448c0, 0x44a7c,
- 0x44ac0, 0x44c7c,
- 0x44cc0, 0x44e7c,
- 0x44ec0, 0x4507c,
- 0x450c0, 0x451fc,
- 0x45800, 0x45868,
+ 0x440c0, 0x441ac,
+ 0x441b4, 0x4427c,
+ 0x442c0, 0x443ac,
+ 0x443b4, 0x4447c,
+ 0x444c0, 0x445ac,
+ 0x445b4, 0x4467c,
+ 0x446c0, 0x447ac,
+ 0x447b4, 0x4487c,
+ 0x448c0, 0x449ac,
+ 0x449b4, 0x44a7c,
+ 0x44ac0, 0x44bac,
+ 0x44bb4, 0x44c7c,
+ 0x44cc0, 0x44dac,
+ 0x44db4, 0x44e7c,
+ 0x44ec0, 0x44fac,
+ 0x44fb4, 0x4507c,
+ 0x450c0, 0x451ac,
+ 0x451b4, 0x451fc,
+ 0x45800, 0x45804,
+ 0x45810, 0x45830,
+ 0x45840, 0x45860,
+ 0x45868, 0x45868,
0x45880, 0x45884,
0x458a0, 0x458b0,
- 0x45a00, 0x45a68,
+ 0x45a00, 0x45a04,
+ 0x45a10, 0x45a30,
+ 0x45a40, 0x45a60,
+ 0x45a68, 0x45a68,
0x45a80, 0x45a84,
0x45aa0, 0x45ab0,
0x460c0, 0x460e4,
- 0x47000, 0x4708c,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
0x47200, 0x47250,
- 0x47400, 0x47420,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
0x47600, 0x47618,
- 0x47800, 0x4782c,
- 0x50000, 0x500cc,
+ 0x47800, 0x47814,
+ 0x47820, 0x4782c,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
0x50400, 0x50400,
- 0x50800, 0x508cc,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
0x50c00, 0x50c00,
- 0x51000, 0x510b0,
+ 0x51000, 0x51020,
+ 0x51028, 0x510b0,
0x51300, 0x51324,
};
@@ -2177,11 +2981,15 @@ int t4_get_exprom_version(struct adapter *adap, u32 *vers)
*/
int t4_check_fw_version(struct adapter *adap)
{
- int ret, major, minor, micro;
+ int i, ret, major, minor, micro;
int exp_major, exp_minor, exp_micro;
unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+ /* Try multiple times before returning error */
+ for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
+ ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+
if (ret)
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 640369df8b3a..13708fde1668 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -263,4 +263,9 @@ enum {
#undef FLASH_START
#undef FLASH_MAX_SIZE
+#define SGE_TIMESTAMP_S 0
+#define SGE_TIMESTAMP_M 0xfffffffffffffffULL
+#define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
+#define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
+
#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b2b5e5bbe04c..0cfa5d72cafd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -56,7 +56,7 @@
* Generic information about the driver.
*/
#define DRV_VERSION "2.0.0-ko"
-#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
/*
* Module Parameters.
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 8b53f7d4bebf..1671fa3332c2 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -50,6 +50,7 @@ struct enic_msix_entry {
char devname[IFNAMSIZ];
irqreturn_t (*isr)(int, void *);
void *devid;
+ cpumask_var_t affinity_mask;
};
/* Store only the lower range. Higher range is given by fw. */
@@ -143,6 +144,7 @@ struct enic {
struct vnic_dev *vdev;
struct timer_list notify_timer;
struct work_struct reset;
+ struct work_struct tx_hang_reset;
struct work_struct change_mtu_work;
struct msix_entry msix_entry[ENIC_INTR_MAX];
struct enic_msix_entry msix[ENIC_INTR_MAX];
@@ -262,6 +264,32 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic)
return enic->rq_count + enic->wq_count + 1;
}
+static inline bool enic_is_err_intr(struct enic *enic, int intr)
+{
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ return intr == enic_legacy_err_intr();
+ case VNIC_DEV_INTR_MODE_MSIX:
+ return intr == enic_msix_err_intr(enic);
+ case VNIC_DEV_INTR_MODE_MSI:
+ default:
+ return false;
+ }
+}
+
+static inline bool enic_is_notify_intr(struct enic *enic, int intr)
+{
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ return intr == enic_legacy_notify_intr();
+ case VNIC_DEV_INTR_MODE_MSIX:
+ return intr == enic_msix_notify_intr(enic);
+ case VNIC_DEV_INTR_MODE_MSI:
+ default:
+ return false;
+ }
+}
+
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
{
if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 3352d027ab89..b36643ef0593 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -39,6 +39,7 @@
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
#include <linux/ktime.h>
+#include <linux/numa.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
@@ -112,6 +113,71 @@ static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{3, 6}, /* 10 - 40 Gbps */
};
+static void enic_init_affinity_hint(struct enic *enic)
+{
+ int numa_node = dev_to_node(&enic->pdev->dev);
+ int i;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
+ (enic->msix[i].affinity_mask &&
+ !cpumask_empty(enic->msix[i].affinity_mask)))
+ continue;
+ if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
+ GFP_KERNEL))
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ enic->msix[i].affinity_mask);
+ }
+}
+
+static void enic_free_affinity_hint(struct enic *enic)
+{
+ int i;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i))
+ continue;
+ free_cpumask_var(enic->msix[i].affinity_mask);
+ }
+}
+
+static void enic_set_affinity_hint(struct enic *enic)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ if (enic_is_err_intr(enic, i) ||
+ enic_is_notify_intr(enic, i) ||
+ !enic->msix[i].affinity_mask ||
+ cpumask_empty(enic->msix[i].affinity_mask))
+ continue;
+ err = irq_set_affinity_hint(enic->msix_entry[i].vector,
+ enic->msix[i].affinity_mask);
+ if (err)
+ netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+ err);
+ }
+
+ for (i = 0; i < enic->wq_count; i++) {
+ int wq_intr = enic_msix_wq_intr(enic, i);
+
+ if (enic->msix[wq_intr].affinity_mask &&
+ !cpumask_empty(enic->msix[wq_intr].affinity_mask))
+ netif_set_xps_queue(enic->netdev,
+ enic->msix[wq_intr].affinity_mask,
+ i);
+ }
+}
+
+static void enic_unset_affinity_hint(struct enic *enic)
+{
+ int i;
+
+ for (i = 0; i < enic->intr_count; i++)
+ irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+}
+
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -178,13 +244,15 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
-static void enic_log_q_error(struct enic *enic)
+static bool enic_log_q_error(struct enic *enic)
{
unsigned int i;
u32 error_status;
+ bool err = false;
for (i = 0; i < enic->wq_count; i++) {
error_status = vnic_wq_error_status(&enic->wq[i]);
+ err |= error_status;
if (error_status)
netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
i, error_status);
@@ -192,10 +260,13 @@ static void enic_log_q_error(struct enic *enic)
for (i = 0; i < enic->rq_count; i++) {
error_status = vnic_rq_error_status(&enic->rq[i]);
+ err |= error_status;
if (error_status)
netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
i, error_status);
}
+
+ return err;
}
static void enic_msglvl_check(struct enic *enic)
@@ -333,10 +404,9 @@ static irqreturn_t enic_isr_msix_err(int irq, void *data)
vnic_intr_return_all_credits(&enic->intr[intr]);
- enic_log_q_error(enic);
-
- /* schedule recovery from WQ/RQ error */
- schedule_work(&enic->reset);
+ if (enic_log_q_error(enic))
+ /* schedule recovery from WQ/RQ error */
+ schedule_work(&enic->reset);
return IRQ_HANDLED;
}
@@ -804,7 +874,7 @@ static void enic_set_rx_mode(struct net_device *netdev)
static void enic_tx_timeout(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- schedule_work(&enic->reset);
+ schedule_work(&enic->tx_hang_reset);
}
static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1645,6 +1715,8 @@ static int enic_open(struct net_device *netdev)
netdev_err(netdev, "Unable to request irq.\n");
return err;
}
+ enic_init_affinity_hint(enic);
+ enic_set_affinity_hint(enic);
err = enic_dev_notify_set(enic);
if (err) {
@@ -1697,6 +1769,7 @@ err_out_free_rq:
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
enic_dev_notify_unset(enic);
err_out_free_intr:
+ enic_unset_affinity_hint(enic);
enic_free_intr(enic);
return err;
@@ -1750,6 +1823,7 @@ static int enic_stop(struct net_device *netdev)
}
enic_dev_notify_unset(enic);
+ enic_unset_affinity_hint(enic);
enic_free_intr(enic);
for (i = 0; i < enic->wq_count; i++)
@@ -1924,6 +1998,19 @@ static int enic_dev_open(struct enic *enic)
return err;
}
+static int enic_dev_soft_reset(struct enic *enic)
+{
+ int err;
+
+ err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
+ vnic_dev_soft_reset_done, 0);
+ if (err)
+ netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
+ err);
+
+ return err;
+}
+
static int enic_dev_hang_reset(struct enic *enic)
{
int err;
@@ -2060,6 +2147,26 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
spin_lock(&enic->enic_api_lock);
+ enic_stop(enic->netdev);
+ enic_dev_soft_reset(enic);
+ enic_reset_addr_lists(enic);
+ enic_init_vnic_resources(enic);
+ enic_set_rss_nic_cfg(enic);
+ enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_open(enic->netdev);
+ spin_unlock(&enic->enic_api_lock);
+ call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
+
+ rtnl_unlock();
+}
+
+static void enic_tx_hang_reset(struct work_struct *work)
+{
+ struct enic *enic = container_of(work, struct enic, tx_hang_reset);
+
+ rtnl_lock();
+
+ spin_lock(&enic->enic_api_lock);
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
@@ -2272,6 +2379,7 @@ static void enic_dev_deinit(struct enic *enic)
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
+ enic_free_affinity_hint(enic);
}
static void enic_kdump_kernel_config(struct enic *enic)
@@ -2367,6 +2475,7 @@ static int enic_dev_init(struct enic *enic)
return 0;
err_out_free_vnic_resources:
+ enic_free_affinity_hint(enic);
enic_clear_intr_mode(enic);
enic_free_vnic_resources(enic);
@@ -2583,6 +2692,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
+ INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
for (i = 0; i < enic->wq_count; i++)
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index a3badefaf360..1ffd1050860b 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -659,14 +659,14 @@ int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
return 0;
}
-static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
}
-static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index b013b6a78e87..54156c484424 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -155,7 +155,9 @@ int vnic_dev_deinit(struct vnic_dev *vdev);
void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index a02ecc4f9002..cadcee645f74 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1597,7 +1597,6 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
- info->eedump_len = DE_EEPROM_SIZE;
}
static int de_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2c9ed1710ba6..f4cb8e425853 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -234,9 +234,6 @@ static void be_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a2c96fd88393..ff665493ca97 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -201,6 +201,7 @@ struct ethoc {
void __iomem *membase;
int dma_alloc;
resource_size_t io_region_size;
+ bool big_endian;
unsigned int num_bd;
unsigned int num_tx;
@@ -236,12 +237,18 @@ struct ethoc_bd {
static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
{
- return ioread32(dev->iobase + offset);
+ if (dev->big_endian)
+ return ioread32be(dev->iobase + offset);
+ else
+ return ioread32(dev->iobase + offset);
}
static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
{
- iowrite32(data, dev->iobase + offset);
+ if (dev->big_endian)
+ iowrite32be(data, dev->iobase + offset);
+ else
+ iowrite32(data, dev->iobase + offset);
}
static inline void ethoc_read_bd(struct ethoc *dev, int index,
@@ -1106,6 +1113,9 @@ static int ethoc_probe(struct platform_device *pdev)
priv->dma_alloc = buffer_size;
}
+ priv->big_endian = pdata ? pdata->big_endian :
+ of_device_is_big_endian(pdev->dev.of_node);
+
/* calculate the number of TX/RX buffers, maximum 128 supported */
num_bd = min_t(unsigned int,
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index dd4ca39d5d8f..b2a32209ffbf 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3070,7 +3070,6 @@ static void fec_poll_controller(struct net_device *dev)
}
#endif
-#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -3094,7 +3093,7 @@ static int fec_set_features(struct net_device *netdev,
struct fec_enet_private *fep = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features;
- if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+ if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
napi_disable(&fep->napi);
netif_tx_lock_bh(netdev);
fec_stop(netdev);
@@ -3262,7 +3261,7 @@ static void fec_reset_phy(struct platform_device *pdev)
return;
}
msleep(msec);
- gpio_set_value(phy_reset, 1);
+ gpio_set_value_cansleep(phy_reset, 1);
}
#else /* CONFIG_OF */
static void fec_reset_phy(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1543cf0e8ef6..f9e74461bdc0 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -112,9 +112,8 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
unsigned long flags;
u32 val, tempval;
int inc;
- struct timespec ts;
+ struct timespec64 ts;
u64 ns;
- u32 remainder;
val = 0;
if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
@@ -163,8 +162,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
tempval = readl(fep->hwp + FEC_ATIME);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
- ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
- ts.tv_nsec = remainder;
+ ts = ns_to_timespec64(ns);
/* The tempval is less than 3 seconds, and so val is less than
* 4 seconds. No overflow for 32bit calculation.
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ce38d266f931..3e6b9b437497 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -107,7 +107,7 @@
#include "gianfar.h"
-#define TX_TIMEOUT (1*HZ)
+#define TX_TIMEOUT (5*HZ)
const char gfar_driver_version[] = "2.0";
@@ -907,6 +907,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (of_find_property(np, "fsl,magic-packet", NULL))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
+ if (of_get_property(np, "fsl,wake-on-filer", NULL))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
+
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* In the case of a fixed PHY, the DT node associated
@@ -1415,8 +1418,14 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
- device_set_wakeup_capable(&dev->dev, priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
+ priv->wol_supported |= GFAR_WOL_MAGIC;
+
+ if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
+ priv->rx_filer_enable)
+ priv->wol_supported |= GFAR_WOL_FILER_UCAST;
+
+ device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1479,15 +1488,122 @@ static int gfar_remove(struct platform_device *ofdev)
#ifdef CONFIG_PM
+static void __gfar_filer_disable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 temp;
+
+ temp = gfar_read(&regs->rctrl);
+ temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
+ gfar_write(&regs->rctrl, temp);
+}
+
+static void __gfar_filer_enable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 temp;
+
+ temp = gfar_read(&regs->rctrl);
+ temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
+ gfar_write(&regs->rctrl, temp);
+}
+
+/* Filer rules implementing wol capabilities */
+static void gfar_filer_config_wol(struct gfar_private *priv)
+{
+ unsigned int i;
+ u32 rqfcr;
+
+ __gfar_filer_disable(priv);
+
+ /* clear the filer table, reject any packet by default */
+ rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
+ for (i = 0; i <= MAX_FILER_IDX; i++)
+ gfar_write_filer(priv, i, rqfcr, 0);
+
+ i = 0;
+ if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
+ /* unicast packet, accept it */
+ struct net_device *ndev = priv->ndev;
+ /* get the default rx queue index */
+ u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
+ u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
+ (ndev->dev_addr[1] << 8) |
+ ndev->dev_addr[2];
+
+ rqfcr = (qindex << 10) | RQFCR_AND |
+ RQFCR_CMP_EXACT | RQFCR_PID_DAH;
+
+ gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+
+ dest_mac_addr = (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5];
+ rqfcr = (qindex << 10) | RQFCR_GPI |
+ RQFCR_CMP_EXACT | RQFCR_PID_DAL;
+ gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+ }
+
+ __gfar_filer_enable(priv);
+}
+
+static void gfar_filer_restore_table(struct gfar_private *priv)
+{
+ u32 rqfcr, rqfpr;
+ unsigned int i;
+
+ __gfar_filer_disable(priv);
+
+ for (i = 0; i <= MAX_FILER_IDX; i++) {
+ rqfcr = priv->ftp_rqfcr[i];
+ rqfpr = priv->ftp_rqfpr[i];
+ gfar_write_filer(priv, i, rqfcr, rqfpr);
+ }
+
+ __gfar_filer_enable(priv);
+}
+
+/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
+static void gfar_start_wol_filer(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ int i = 0;
+
+ /* Enable Rx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval &= ~DMACTRL_GRS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ for (i = 0; i < priv->num_grps; i++) {
+ regs = priv->gfargrp[i].regs;
+ /* Clear RHLT, so that the DMA starts polling now */
+ gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+ /* enable the Filer General Purpose Interrupt */
+ gfar_write(&regs->imask, IMASK_FGPI);
+ }
+
+ /* Enable Rx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= MACCFG1_RX_EN;
+ gfar_write(&regs->maccfg1, tempval);
+}
+
static int gfar_suspend(struct device *dev)
{
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- int magic_packet = priv->wol_en &&
- (priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ u16 wol = priv->wol_opts;
if (!netif_running(ndev))
return 0;
@@ -1499,7 +1615,7 @@ static int gfar_suspend(struct device *dev)
gfar_halt(priv);
- if (magic_packet) {
+ if (wol & GFAR_WOL_MAGIC) {
/* Enable interrupt on Magic Packet */
gfar_write(&regs->imask, IMASK_MAG);
@@ -1513,6 +1629,10 @@ static int gfar_suspend(struct device *dev)
tempval |= MACCFG1_RX_EN;
gfar_write(&regs->maccfg1, tempval);
+ } else if (wol & GFAR_WOL_FILER_UCAST) {
+ gfar_filer_config_wol(priv);
+ gfar_start_wol_filer(priv);
+
} else {
phy_stop(priv->phydev);
}
@@ -1526,18 +1646,22 @@ static int gfar_resume(struct device *dev)
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- int magic_packet = priv->wol_en &&
- (priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ u16 wol = priv->wol_opts;
if (!netif_running(ndev))
return 0;
- if (magic_packet) {
+ if (wol & GFAR_WOL_MAGIC) {
/* Disable Magic Packet mode */
tempval = gfar_read(&regs->maccfg2);
tempval &= ~MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
+
+ } else if (wol & GFAR_WOL_FILER_UCAST) {
+ /* need to stop rx only, tx is already down */
+ gfar_halt(priv);
+ gfar_filer_restore_table(priv);
+
} else {
phy_start(priv->phydev);
}
@@ -1998,6 +2122,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
gfar_irq(grp, RX)->irq);
goto rx_irq_fail;
}
+ enable_irq_wake(gfar_irq(grp, RX)->irq);
+
} else {
err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
gfar_irq(grp, TX)->name, grp);
@@ -2743,7 +2869,14 @@ irqreturn_t gfar_receive(int irq, void *grp_id)
{
struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
unsigned long flags;
- u32 imask;
+ u32 imask, ievent;
+
+ ievent = gfar_read(&grp->regs->ievent);
+
+ if (unlikely(ievent & IEVENT_FGPI)) {
+ gfar_write(&grp->regs->ievent, IEVENT_FGPI);
+ return IRQ_HANDLED;
+ }
if (likely(napi_schedule_prep(&grp->napi_rx))) {
spin_lock_irqsave(&grp->grplock, flags);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 8c1994856e93..f266b20f9ef5 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -340,6 +340,7 @@ extern const char gfar_driver_version[];
#define IEVENT_MAG 0x00000800
#define IEVENT_GRSC 0x00000100
#define IEVENT_RXF0 0x00000080
+#define IEVENT_FGPI 0x00000010
#define IEVENT_FIR 0x00000008
#define IEVENT_FIQ 0x00000004
#define IEVENT_DPE 0x00000002
@@ -372,6 +373,7 @@ extern const char gfar_driver_version[];
#define IMASK_MAG 0x00000800
#define IMASK_GRSC 0x00000100
#define IMASK_RXFEN0 0x00000080
+#define IMASK_FGPI 0x00000010
#define IMASK_FIR 0x00000008
#define IMASK_FIQ 0x00000004
#define IMASK_DPE 0x00000002
@@ -540,6 +542,9 @@ extern const char gfar_driver_version[];
#define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */
+#define GFAR_WOL_MAGIC 0x00000001
+#define GFAR_WOL_FILER_UCAST 0x00000002
+
struct txbd8
{
union {
@@ -917,6 +922,7 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
+#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
#if (MAXGROUPS == 2)
#define DEFAULT_MAPPING 0xAA
@@ -1161,8 +1167,6 @@ struct gfar_private {
extended_hash:1,
bd_stash_en:1,
rx_filer_enable:1,
- /* Wake-on-LAN enabled */
- wol_en:1,
/* Enable priorty based Tx scheduling in Hw */
prio_sched_en:1,
/* Flow control flags */
@@ -1191,6 +1195,10 @@ struct gfar_private {
u32 __iomem *hash_regs[16];
int hash_width;
+ /* wake-on-lan settings */
+ u16 wol_opts;
+ u16 wol_supported;
+
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index a33e4a829601..4b0ee855edd7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -182,8 +182,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
@@ -644,28 +642,49 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct gfar_private *priv = netdev_priv(dev);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
- wol->supported = WAKE_MAGIC;
- wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
- } else {
- wol->supported = wol->wolopts = 0;
- }
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (priv->wol_supported & GFAR_WOL_MAGIC)
+ wol->supported |= WAKE_MAGIC;
+
+ if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
+ wol->supported |= WAKE_UCAST;
+
+ if (priv->wol_opts & GFAR_WOL_MAGIC)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
+ wol->wolopts |= WAKE_UCAST;
}
static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct gfar_private *priv = netdev_priv(dev);
+ u16 wol_opts = 0;
+ int err;
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
- wol->wolopts != 0)
+ if (!priv->wol_supported && wol->wolopts)
return -EINVAL;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
return -EINVAL;
- device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+ if (wol->wolopts & WAKE_MAGIC) {
+ wol_opts |= GFAR_WOL_MAGIC;
+ } else {
+ if (wol->wolopts & WAKE_UCAST)
+ wol_opts |= GFAR_WOL_FILER_UCAST;
+ }
+
+ wol_opts &= priv->wol_supported;
+ priv->wol_opts = 0;
+
+ err = device_set_wakeup_enable(priv->dev, wol_opts);
+ if (err)
+ return err;
- priv->wol_en = !!device_may_wakeup(&dev->dev);
+ priv->wol_opts = wol_opts;
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index cc83350d56ba..89714f5e0dfc 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -351,8 +351,6 @@ uec_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
- drvinfo->eedump_len = 0;
- drvinfo->regdump_len = uec_get_regs_len(netdev);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index dead17b5d769..f250dec488fd 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_HISILICON
bool "Hisilicon devices"
default y
- depends on ARM
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -24,11 +24,42 @@ config HIX5HD2_GMAC
config HIP04_ETH
tristate "HISILICON P04 Ethernet support"
- select PHYLIB
select MARVELL_PHY
select MFD_SYSCON
+ select HNS_MDIO
---help---
If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
want to use the internal ethernet then you should answer Y to this.
+config HNS_MDIO
+ tristate
+ select PHYLIB
+ ---help---
+ This selects the HNS MDIO support. It is needed by HNS_DSAF to access
+ the PHY
+
+config HNS
+ tristate "Hisilicon Network Subsystem Support (Framework)"
+ ---help---
+ This selects the framework support for Hisilicon Network Subsystem. It
+ is needed by any driver which provides HNS acceleration engine or make
+ use of the engine
+
+config HNS_DSAF
+ tristate "Hisilicon HNS DSAF device Support"
+ select HNS
+ select HNS_MDIO
+ ---help---
+ This selects the DSAF (Distributed System Area Frabric) network
+ acceleration engine support. The engine is used in Hisilicon hip05,
+ Hi1610 and further ICT SoC
+
+config HNS_ENET
+ tristate "Hisilicon HNS Ethernet Device Support"
+ select PHYLIB
+ select HNS
+ ---help---
+ This selects the general ethernet driver for HNS. This module make
+ use of any HNS AE driver, such as HNS_DSAF
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 6c14540a4dc5..390b71fb3000 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -3,4 +3,6 @@
#
obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
-obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
+obj-$(CONFIG_HIP04_ETH) += hip04_eth.o
+obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
+obj-$(CONFIG_HNS) += hns/
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
deleted file mode 100644
index fca0a5be1f0f..000000000000
--- a/drivers/net/ethernet/hisilicon/hip04_mdio.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/* Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/of_mdio.h>
-#include <linux/delay.h>
-
-#define MDIO_CMD_REG 0x0
-#define MDIO_ADDR_REG 0x4
-#define MDIO_WDATA_REG 0x8
-#define MDIO_RDATA_REG 0xc
-#define MDIO_STA_REG 0x10
-
-#define MDIO_START BIT(14)
-#define MDIO_R_VALID BIT(1)
-#define MDIO_READ (BIT(12) | BIT(11) | MDIO_START)
-#define MDIO_WRITE (BIT(12) | BIT(10) | MDIO_START)
-
-struct hip04_mdio_priv {
- void __iomem *base;
-};
-
-#define WAIT_TIMEOUT 10
-static int hip04_mdio_wait_ready(struct mii_bus *bus)
-{
- struct hip04_mdio_priv *priv = bus->priv;
- int i;
-
- for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
- if (i == WAIT_TIMEOUT)
- return -ETIMEDOUT;
- msleep(20);
- }
-
- return 0;
-}
-
-static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
- struct hip04_mdio_priv *priv = bus->priv;
- u32 val;
- int ret;
-
- ret = hip04_mdio_wait_ready(bus);
- if (ret < 0)
- goto out;
-
- val = regnum | (mii_id << 5) | MDIO_READ;
- writel_relaxed(val, priv->base + MDIO_CMD_REG);
-
- ret = hip04_mdio_wait_ready(bus);
- if (ret < 0)
- goto out;
-
- val = readl_relaxed(priv->base + MDIO_STA_REG);
- if (val & MDIO_R_VALID) {
- dev_err(bus->parent, "SMI bus read not valid\n");
- ret = -ENODEV;
- goto out;
- }
-
- val = readl_relaxed(priv->base + MDIO_RDATA_REG);
- ret = val & 0xFFFF;
-out:
- return ret;
-}
-
-static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
- int regnum, u16 value)
-{
- struct hip04_mdio_priv *priv = bus->priv;
- u32 val;
- int ret;
-
- ret = hip04_mdio_wait_ready(bus);
- if (ret < 0)
- goto out;
-
- writel_relaxed(value, priv->base + MDIO_WDATA_REG);
- val = regnum | (mii_id << 5) | MDIO_WRITE;
- writel_relaxed(val, priv->base + MDIO_CMD_REG);
-out:
- return ret;
-}
-
-static int hip04_mdio_reset(struct mii_bus *bus)
-{
- int temp, i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- hip04_mdio_write(bus, i, 22, 0);
- temp = hip04_mdio_read(bus, i, MII_BMCR);
- if (temp < 0)
- continue;
-
- temp |= BMCR_RESET;
- if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
- continue;
- }
-
- mdelay(500);
- return 0;
-}
-
-static int hip04_mdio_probe(struct platform_device *pdev)
-{
- struct resource *r;
- struct mii_bus *bus;
- struct hip04_mdio_priv *priv;
- int ret;
-
- bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
- if (!bus) {
- dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
- return -ENOMEM;
- }
-
- bus->name = "hip04_mdio_bus";
- bus->read = hip04_mdio_read;
- bus->write = hip04_mdio_write;
- bus->reset = hip04_mdio_reset;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
- bus->parent = &pdev->dev;
- priv = bus->priv;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(priv->base)) {
- ret = PTR_ERR(priv->base);
- goto out_mdio;
- }
-
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
- goto out_mdio;
- }
-
- platform_set_drvdata(pdev, bus);
-
- return 0;
-
-out_mdio:
- mdiobus_free(bus);
- return ret;
-}
-
-static int hip04_mdio_remove(struct platform_device *pdev)
-{
- struct mii_bus *bus = platform_get_drvdata(pdev);
-
- mdiobus_unregister(bus);
- mdiobus_free(bus);
-
- return 0;
-}
-
-static const struct of_device_id hip04_mdio_match[] = {
- { .compatible = "hisilicon,hip04-mdio" },
- { }
-};
-MODULE_DEVICE_TABLE(of, hip04_mdio_match);
-
-static struct platform_driver hip04_mdio_driver = {
- .probe = hip04_mdio_probe,
- .remove = hip04_mdio_remove,
- .driver = {
- .name = "hip04-mdio",
- .of_match_table = hip04_mdio_match,
- },
-};
-
-module_platform_driver(hip04_mdio_driver);
-
-MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hip04-mdio");
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index a5e077eac99a..e51892d518ff 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -371,7 +371,7 @@ static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
{
- writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
+ writel_relaxed(~(u32)(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/Makefile b/drivers/net/ethernet/hisilicon/hns/Makefile
new file mode 100644
index 000000000000..6010c83e38d8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HNS) += hnae.o
+
+obj-$(CONFIG_HNS_DSAF) += hns_dsaf.o
+hns_dsaf-objs = hns_ae_adapt.o hns_dsaf_gmac.o hns_dsaf_mac.o hns_dsaf_misc.o \
+ hns_dsaf_main.o hns_dsaf_ppe.o hns_dsaf_rcb.o hns_dsaf_xgmac.o
+
+obj-$(CONFIG_HNS_ENET) += hns_enet_drv.o
+hns_enet_drv-objs = hns_enet.o hns_ethtool.o
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
new file mode 100644
index 000000000000..b3645297477e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include "hnae.h"
+
+#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
+
+static struct class *hnae_class;
+
+static void
+hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_add_tail_rcu(node, head);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static void hnae_list_del(spinlock_t *lock, struct list_head *node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_del_rcu(node);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ unsigned int order = hnae_page_order(ring);
+ struct page *p = dev_alloc_pages(order);
+
+ if (!p)
+ return -ENOMEM;
+
+ cb->priv = p;
+ cb->page_offset = 0;
+ cb->reuse_flag = 0;
+ cb->buf = page_address(p);
+ cb->length = hnae_page_size(ring);
+ cb->type = DESC_TYPE_PAGE;
+
+ return 0;
+}
+
+static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ if (cb->type == DESC_TYPE_SKB)
+ dev_kfree_skb_any((struct sk_buff *)cb->priv);
+ else if (unlikely(is_rx_ring(ring)))
+ put_page((struct page *)cb->priv);
+ memset(cb, 0, sizeof(*cb));
+}
+
+static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
+ cb->length, ring_to_dma_dir(ring));
+
+ if (dma_mapping_error(ring_to_dev(ring), cb->dma))
+ return -EIO;
+
+ return 0;
+}
+
+static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ if (cb->type == DESC_TYPE_SKB)
+ dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+ else
+ dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+}
+
+static struct hnae_buf_ops hnae_bops = {
+ .alloc_buffer = hnae_alloc_buffer,
+ .free_buffer = hnae_free_buffer,
+ .map_buffer = hnae_map_buffer,
+ .unmap_buffer = hnae_unmap_buffer,
+};
+
+static int __ae_match(struct device *dev, const void *data)
+{
+ struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
+ const char *ae_id = data;
+
+ if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE))
+ return 1;
+
+ return 0;
+}
+
+static struct hnae_ae_dev *find_ae(const char *ae_id)
+{
+ struct device *dev;
+
+ WARN_ON(!ae_id);
+
+ dev = class_find_device(hnae_class, NULL, ae_id, __ae_match);
+
+ return dev ? cls_to_ae_dev(dev) : NULL;
+}
+
+static void hnae_free_buffers(struct hnae_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < ring->desc_num; i++)
+ hnae_free_buffer_detach(ring, i);
+}
+
+/* Allocate memory for raw pkg, and map with dma */
+static int hnae_alloc_buffers(struct hnae_ring *ring)
+{
+ int i, j, ret;
+
+ for (i = 0; i < ring->desc_num; i++) {
+ ret = hnae_alloc_buffer_attach(ring, i);
+ if (ret)
+ goto out_buffer_fail;
+ }
+
+ return 0;
+
+out_buffer_fail:
+ for (j = i - 1; j >= 0; j--)
+ hnae_free_buffer_detach(ring, j);
+ return ret;
+}
+
+/* free desc along with its attached buffer */
+static void hnae_free_desc(struct hnae_ring *ring)
+{
+ hnae_free_buffers(ring);
+ dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
+ ring->desc_num * sizeof(ring->desc[0]),
+ ring_to_dma_dir(ring));
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+}
+
+/* alloc desc, without buffer attached */
+static int hnae_alloc_desc(struct hnae_ring *ring)
+{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
+ ring->desc, size, ring_to_dma_dir(ring));
+ if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* fini ring, also free the buffer for the ring */
+static void hnae_fini_ring(struct hnae_ring *ring)
+{
+ hnae_free_desc(ring);
+ kfree(ring->desc_cb);
+ ring->desc_cb = NULL;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+}
+
+/* init ring, and with buffer for rx ring */
+static int
+hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
+{
+ int ret;
+
+ if (ring->desc_num <= 0 || ring->buf_size <= 0)
+ return -EINVAL;
+
+ ring->q = q;
+ ring->flags = flags;
+ assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
+
+ /* not matter for tx or rx ring, the ntc and ntc start from 0 */
+ assert(ring->next_to_use == 0);
+ assert(ring->next_to_clean == 0);
+
+ ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
+ GFP_KERNEL);
+ if (!ring->desc_cb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hnae_alloc_desc(ring);
+ if (ret)
+ goto out_with_desc_cb;
+
+ if (is_rx_ring(ring)) {
+ ret = hnae_alloc_buffers(ring);
+ if (ret)
+ goto out_with_desc;
+ }
+
+ return 0;
+
+out_with_desc:
+ hnae_free_desc(ring);
+out_with_desc_cb:
+ kfree(ring->desc_cb);
+ ring->desc_cb = NULL;
+out:
+ return ret;
+}
+
+static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
+ struct hnae_ae_dev *dev)
+{
+ int ret;
+
+ q->dev = dev;
+ q->handle = h;
+
+ ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
+ if (ret)
+ goto out;
+
+ ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
+ if (ret)
+ goto out_with_tx_ring;
+
+ if (dev->ops->init_queue)
+ dev->ops->init_queue(q);
+
+ return 0;
+
+out_with_tx_ring:
+ hnae_fini_ring(&q->tx_ring);
+out:
+ return ret;
+}
+
+static void hnae_fini_queue(struct hnae_queue *q)
+{
+ if (q->dev->ops->fini_queue)
+ q->dev->ops->fini_queue(q);
+
+ hnae_fini_ring(&q->tx_ring);
+ hnae_fini_ring(&q->rx_ring);
+}
+
+/**
+ * ae_chain - define ae chain head
+ */
+static RAW_NOTIFIER_HEAD(ae_chain);
+
+int hnae_register_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&ae_chain, nb);
+}
+EXPORT_SYMBOL(hnae_register_notifier);
+
+void hnae_unregister_notifier(struct notifier_block *nb)
+{
+ if (raw_notifier_chain_unregister(&ae_chain, nb))
+ dev_err(NULL, "notifier chain unregister fail\n");
+}
+EXPORT_SYMBOL(hnae_unregister_notifier);
+
+int hnae_reinit_handle(struct hnae_handle *handle)
+{
+ int i, j;
+ int ret;
+
+ for (i = 0; i < handle->q_num; i++) /* free ring*/
+ hnae_fini_queue(handle->qs[i]);
+
+ if (handle->dev->ops->reset)
+ handle->dev->ops->reset(handle);
+
+ for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
+ ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
+ if (ret)
+ goto out_when_init_queue;
+ }
+ return 0;
+out_when_init_queue:
+ for (j = i - 1; j >= 0; j--)
+ hnae_fini_queue(handle->qs[j]);
+ return ret;
+}
+EXPORT_SYMBOL(hnae_reinit_handle);
+
+/* hnae_get_handle - get a handle from the AE
+ * @owner_dev: the dev use this handle
+ * @ae_id: the id of the ae to be used
+ * @ae_opts: the options set for the handle
+ * @bops: the callbacks for buffer management
+ *
+ * return handle ptr or ERR_PTR
+ */
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+ const char *ae_id, u32 port_id,
+ struct hnae_buf_ops *bops)
+{
+ struct hnae_ae_dev *dev;
+ struct hnae_handle *handle;
+ int i, j;
+ int ret;
+
+ dev = find_ae(ae_id);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ handle = dev->ops->get_handle(dev, port_id);
+ if (IS_ERR(handle))
+ return handle;
+
+ handle->dev = dev;
+ handle->owner_dev = owner_dev;
+ handle->bops = bops ? bops : &hnae_bops;
+ handle->eport_id = port_id;
+
+ for (i = 0; i < handle->q_num; i++) {
+ ret = hnae_init_queue(handle, handle->qs[i], dev);
+ if (ret)
+ goto out_when_init_queue;
+ }
+
+ __module_get(dev->owner);
+
+ hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
+
+ return handle;
+
+out_when_init_queue:
+ for (j = i - 1; j >= 0; j--)
+ hnae_fini_queue(handle->qs[j]);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(hnae_get_handle);
+
+void hnae_put_handle(struct hnae_handle *h)
+{
+ struct hnae_ae_dev *dev = h->dev;
+ int i;
+
+ for (i = 0; i < h->q_num; i++)
+ hnae_fini_queue(h->qs[i]);
+
+ if (h->dev->ops->reset)
+ h->dev->ops->reset(h);
+
+ hnae_list_del(&dev->lock, &h->node);
+
+ if (dev->ops->put_handle)
+ dev->ops->put_handle(h);
+
+ module_put(dev->owner);
+}
+EXPORT_SYMBOL(hnae_put_handle);
+
+static void hnae_release(struct device *dev)
+{
+}
+
+/**
+ * hnae_ae_register - register a AE engine to hnae framework
+ * @hdev: the hnae ae engine device
+ * @owner: the module who provides this dev
+ * NOTE: the duplicated name will not be checked
+ */
+int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
+{
+ static atomic_t id = ATOMIC_INIT(-1);
+ int ret;
+
+ if (!hdev->dev)
+ return -ENODEV;
+
+ if (!hdev->ops || !hdev->ops->get_handle ||
+ !hdev->ops->toggle_ring_irq ||
+ !hdev->ops->toggle_queue_status ||
+ !hdev->ops->get_status || !hdev->ops->adjust_link)
+ return -EINVAL;
+
+ hdev->owner = owner;
+ hdev->id = (int)atomic_inc_return(&id);
+ hdev->cls_dev.parent = hdev->dev;
+ hdev->cls_dev.class = hnae_class;
+ hdev->cls_dev.release = hnae_release;
+ (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
+ ret = device_register(&hdev->cls_dev);
+ if (ret)
+ return ret;
+
+ __module_get(THIS_MODULE);
+
+ INIT_LIST_HEAD(&hdev->handle_list);
+ spin_lock_init(&hdev->lock);
+
+ ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
+ if (ret)
+ dev_dbg(hdev->dev,
+ "has not notifier for AE: %s\n", hdev->name);
+
+ return 0;
+}
+EXPORT_SYMBOL(hnae_ae_register);
+
+/**
+ * hnae_ae_unregister - unregisters a HNAE AE engine
+ * @cdev: the device to unregister
+ */
+void hnae_ae_unregister(struct hnae_ae_dev *hdev)
+{
+ device_unregister(&hdev->cls_dev);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL(hnae_ae_unregister);
+
+static int __init hnae_init(void)
+{
+ hnae_class = class_create(THIS_MODULE, "hnae");
+ return PTR_ERR_OR_ZERO(hnae_class);
+}
+
+static void __exit hnae_exit(void)
+{
+ class_destroy(hnae_class);
+}
+
+subsys_initcall(hnae_init);
+module_exit(hnae_exit);
+
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
+
+/* vi: set tw=78 noet: */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
new file mode 100644
index 000000000000..cec95ac8687d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNAE_H
+#define __HNAE_H
+
+/* Names used in this framework:
+ * ae handle (handle):
+ * a set of queues provided by AE
+ * ring buffer queue (rbq):
+ * the channel between upper layer and the AE, can do tx and rx
+ * ring:
+ * a tx or rx channel within a rbq
+ * ring description (desc):
+ * an element in the ring with packet information
+ * buffer:
+ * a memory region referred by desc with the full packet payload
+ *
+ * "num" means a static number set as a parameter, "count" mean a dynamic
+ * number set while running
+ * "cb" means control block
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/phy.h>
+#include <linux/types.h>
+
+#define HNAE_DRIVER_VERSION "1.3.0"
+#define HNAE_DRIVER_NAME "hns"
+#define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver"
+#define HNAE_DEFAULT_DEVICE_DESCR "Hisilicon Network Subsystem"
+
+#ifdef DEBUG
+
+#ifndef assert
+#define assert(expr) \
+do { \
+ if (!(expr)) { \
+ pr_err("Assertion failed! %s, %s, %s, line %d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ } \
+} while (0)
+#endif
+
+#else
+
+#ifndef assert
+#define assert(expr)
+#endif
+
+#endif
+
+#define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0')
+#define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0')
+#define AE_NAME_SIZE 16
+
+/* some said the RX and TX RCB format should not be the same in the future. But
+ * it is the same now...
+ */
+#define RCB_REG_BASEADDR_L 0x00 /* P660 support only 32bit accessing */
+#define RCB_REG_BASEADDR_H 0x04
+#define RCB_REG_BD_NUM 0x08
+#define RCB_REG_BD_LEN 0x0C
+#define RCB_REG_PKTLINE 0x10
+#define RCB_REG_TAIL 0x18
+#define RCB_REG_HEAD 0x1C
+#define RCB_REG_FBDNUM 0x20
+#define RCB_REG_OFFSET 0x24 /* pkt num to be handled */
+#define RCB_REG_PKTNUM_RECORD 0x2C /* total pkt received */
+
+#define HNS_RX_HEAD_SIZE 256
+
+#define HNAE_AE_REGISTER 0x1
+
+#define RCB_RING_NAME_LEN 16
+
+enum hnae_led_state {
+ HNAE_LED_INACTIVE,
+ HNAE_LED_ACTIVE,
+ HNAE_LED_ON,
+ HNAE_LED_OFF
+};
+
+#define HNS_RX_FLAG_VLAN_PRESENT 0x1
+#define HNS_RX_FLAG_L3ID_IPV4 0x0
+#define HNS_RX_FLAG_L3ID_IPV6 0x1
+#define HNS_RX_FLAG_L4ID_UDP 0x0
+#define HNS_RX_FLAG_L4ID_TCP 0x1
+
+#define HNS_TXD_ASID_S 0
+#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
+#define HNS_TXD_BUFNUM_S 8
+#define HNS_TXD_BUFNUM_M (0x3 << HNS_TXD_BUFNUM_S)
+#define HNS_TXD_PORTID_S 10
+#define HNS_TXD_PORTID_M (0x7 << HNS_TXD_PORTID_S)
+
+#define HNS_TXD_RA_B 8
+#define HNS_TXD_RI_B 9
+#define HNS_TXD_L4CS_B 10
+#define HNS_TXD_L3CS_B 11
+#define HNS_TXD_FE_B 12
+#define HNS_TXD_VLD_B 13
+#define HNS_TXD_IPOFFSET_S 14
+#define HNS_TXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+
+#define HNS_RXD_IPOFFSET_S 0
+#define HNS_RXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+#define HNS_RXD_BUFNUM_S 8
+#define HNS_RXD_BUFNUM_M (0x3 << HNS_RXD_BUFNUM_S)
+#define HNS_RXD_PORTID_S 10
+#define HNS_RXD_PORTID_M (0x7 << HNS_RXD_PORTID_S)
+#define HNS_RXD_DMAC_S 13
+#define HNS_RXD_DMAC_M (0x3 << HNS_RXD_DMAC_S)
+#define HNS_RXD_VLAN_S 15
+#define HNS_RXD_VLAN_M (0x3 << HNS_RXD_VLAN_S)
+#define HNS_RXD_L3ID_S 17
+#define HNS_RXD_L3ID_M (0xf << HNS_RXD_L3ID_S)
+#define HNS_RXD_L4ID_S 21
+#define HNS_RXD_L4ID_M (0xf << HNS_RXD_L4ID_S)
+#define HNS_RXD_FE_B 25
+#define HNS_RXD_FRAG_B 26
+#define HNS_RXD_VLD_B 27
+#define HNS_RXD_L2E_B 28
+#define HNS_RXD_L3E_B 29
+#define HNS_RXD_L4E_B 30
+#define HNS_RXD_DROP_B 31
+
+#define HNS_RXD_VLANID_S 8
+#define HNS_RXD_VLANID_M (0xfff << HNS_RXD_VLANID_S)
+#define HNS_RXD_CFI_B 20
+#define HNS_RXD_PRI_S 21
+#define HNS_RXD_PRI_M (0x7 << HNS_RXD_PRI_S)
+#define HNS_RXD_ASID_S 24
+#define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S)
+
+/* hardware spec ring buffer format */
+struct __packed hnae_desc {
+ __le64 addr;
+ union {
+ struct {
+ __le16 asid_bufnum_pid;
+ __le16 send_size;
+ __le32 flag_ipoffset;
+ __le32 reserved_3[4];
+ } tx;
+
+ struct {
+ __le32 ipoff_bnum_pid_flag;
+ __le16 pkt_len;
+ __le16 size;
+ __le32 vlan_pri_asid;
+ __le32 reserved_2[3];
+ } rx;
+ };
+};
+
+struct hnae_desc_cb {
+ dma_addr_t dma; /* dma address of this desc */
+ void *buf; /* cpu addr for a desc */
+
+ /* priv data for the desc, e.g. skb when use with ip stack*/
+ void *priv;
+ u16 page_offset;
+ u16 reuse_flag;
+
+ u16 length; /* length of the buffer */
+
+ /* desc type, used by the ring user to mark the type of the priv data */
+ u16 type;
+};
+
+#define setflags(flags, bits) ((flags) |= (bits))
+#define unsetflags(flags, bits) ((flags) &= ~(bits))
+
+/* hnae_ring->flags fields */
+#define RINGF_DIR 0x1 /* TX or RX ring, set if TX */
+#define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
+#define is_rx_ring(ring) (!is_tx_ring(ring))
+#define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+struct ring_stats {
+ u64 io_err_cnt;
+ u64 sw_err_cnt;
+ u64 seg_pkt_cnt;
+ union {
+ struct {
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_err_cnt;
+ u64 restart_queue;
+ u64 tx_busy;
+ };
+ struct {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_err_cnt;
+ u64 reuse_pg_cnt;
+ u64 err_pkt_len;
+ u64 non_vld_descs;
+ u64 err_bd_num;
+ u64 l2_err;
+ u64 l3l4_csum_err;
+ };
+ };
+};
+
+struct hnae_queue;
+
+struct hnae_ring {
+ u8 __iomem *io_base; /* base io address for the ring */
+ struct hnae_desc *desc; /* dma map address space */
+ struct hnae_desc_cb *desc_cb;
+ struct hnae_queue *q;
+ int irq;
+ char ring_name[RCB_RING_NAME_LEN];
+
+ /* statistic */
+ struct ring_stats stats;
+
+ dma_addr_t desc_dma_addr;
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 desc_num; /* total number of desc */
+ u16 max_desc_num_per_pkt;
+ u16 max_raw_data_sz_per_desc;
+ u16 max_pkt_size;
+ int next_to_use; /* idx of next spare desc */
+
+ /* idx of lastest sent desc, the ring is empty when equal to
+ * next_to_use
+ */
+ int next_to_clean;
+
+ int flags; /* ring attribute */
+ int irq_init_flag;
+};
+
+#define ring_ptr_move_fw(ring, p) \
+ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
+#define ring_ptr_move_bw(ring, p) \
+ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
+
+enum hns_desc_type {
+ DESC_TYPE_SKB,
+ DESC_TYPE_PAGE,
+};
+
+#define assert_is_ring_idx(ring, idx) \
+ assert((idx) >= 0 && (idx) < (ring)->desc_num)
+
+/* the distance between [begin, end) in a ring buffer
+ * note: there is a unuse slot between the begin and the end
+ */
+static inline int ring_dist(struct hnae_ring *ring, int begin, int end)
+{
+ assert_is_ring_idx(ring, begin);
+ assert_is_ring_idx(ring, end);
+
+ return (end - begin + ring->desc_num) % ring->desc_num;
+}
+
+static inline int ring_space(struct hnae_ring *ring)
+{
+ return ring->desc_num -
+ ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+}
+
+static inline int is_ring_empty(struct hnae_ring *ring)
+{
+ assert_is_ring_idx(ring, ring->next_to_use);
+ assert_is_ring_idx(ring, ring->next_to_clean);
+
+ return ring->next_to_use == ring->next_to_clean;
+}
+
+#define hnae_buf_size(_ring) ((_ring)->buf_size)
+#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
+#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+
+struct hnae_handle;
+
+/* allocate and dma map space for hnae desc */
+struct hnae_buf_ops {
+ int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+};
+
+struct hnae_queue {
+ void __iomem *io_base;
+ phys_addr_t phy_base;
+ struct hnae_ae_dev *dev; /* the device who use this queue */
+ struct hnae_ring rx_ring, tx_ring;
+ struct hnae_handle *handle;
+};
+
+/*hnae loop mode*/
+enum hnae_loop {
+ MAC_INTERNALLOOP_MAC = 0,
+ MAC_INTERNALLOOP_SERDES,
+ MAC_INTERNALLOOP_PHY,
+ MAC_LOOP_NONE,
+};
+
+/*hnae port type*/
+enum hnae_port_type {
+ HNAE_PORT_SERVICE = 0,
+ HNAE_PORT_DEBUG
+};
+
+/* This struct defines the operation on the handle.
+ *
+ * get_handle(): (mandatory)
+ * Get a handle from AE according to its name and options.
+ * the AE driver should manage the space used by handle and its queues while
+ * the HNAE framework will allocate desc and desc_cb for all rings in the
+ * queues.
+ * put_handle():
+ * Release the handle.
+ * start():
+ * Enable the hardware, include all queues
+ * stop():
+ * Disable the hardware
+ * set_opts(): (mandatory)
+ * Set options to the AE
+ * get_opts(): (mandatory)
+ * Get options from the AE
+ * get_status():
+ * Get the carrier state of the back channel of the handle, 1 for ok, 0 for
+ * non-ok
+ * toggle_ring_irq(): (mandatory)
+ * Set the ring irq to be enabled(0) or disable(1)
+ * toggle_queue_status(): (mandatory)
+ * Set the queue to be enabled(1) or disable(0), this will not change the
+ * ring irq state
+ * adjust_link()
+ * adjust link status
+ * set_loopback()
+ * set loopback
+ * get_ring_bdnum_limit()
+ * get ring bd number limit
+ * get_pauseparam()
+ * get tx and rx of pause frame use
+ * set_autoneg()
+ * set auto autonegotiation of pause frame use
+ * get_autoneg()
+ * get auto autonegotiation of pause frame use
+ * set_pauseparam()
+ * set tx and rx of pause frame use
+ * get_coalesce_usecs()
+ * get usecs to delay a TX interrupt after a packet is sent
+ * get_rx_max_coalesced_frames()
+ * get Maximum number of packets to be sent before a TX interrupt.
+ * set_coalesce_usecs()
+ * set usecs to delay a TX interrupt after a packet is sent
+ * set_coalesce_frames()
+ * set Maximum number of packets to be sent before a TX interrupt.
+ * get_ringnum()
+ * get RX/TX ring number
+ * get_max_ringnum()
+ * get RX/TX ring maximum number
+ * get_mac_addr()
+ * get mac address
+ * set_mac_addr()
+ * set mac address
+ * set_mc_addr()
+ * set multicast mode
+ * set_mtu()
+ * set mtu
+ * update_stats()
+ * update Old network device statistics
+ * get_ethtool_stats()
+ * get ethtool network device statistics
+ * get_strings()
+ * get a set of strings that describe the requested objects
+ * get_sset_count()
+ * get number of strings that @get_strings will write
+ * update_led_status()
+ * update the led status
+ * set_led_id()
+ * set led id
+ * get_regs()
+ * get regs dump
+ * get_regs_len()
+ * get the len of the regs dump
+ */
+struct hnae_ae_ops {
+ struct hnae_handle *(*get_handle)(struct hnae_ae_dev *dev,
+ u32 port_id);
+ void (*put_handle)(struct hnae_handle *handle);
+ void (*init_queue)(struct hnae_queue *q);
+ void (*fini_queue)(struct hnae_queue *q);
+ int (*start)(struct hnae_handle *handle);
+ void (*stop)(struct hnae_handle *handle);
+ void (*reset)(struct hnae_handle *handle);
+ int (*set_opts)(struct hnae_handle *handle, int type, void *opts);
+ int (*get_opts)(struct hnae_handle *handle, int type, void **opts);
+ int (*get_status)(struct hnae_handle *handle);
+ int (*get_info)(struct hnae_handle *handle,
+ u8 *auto_neg, u16 *speed, u8 *duplex);
+ void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
+ void (*toggle_queue_status)(struct hnae_queue *queue, u32 val);
+ void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+ int (*set_loopback)(struct hnae_handle *handle,
+ enum hnae_loop loop_mode, int en);
+ void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
+ u32 *uplimit);
+ void (*get_pauseparam)(struct hnae_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en);
+ int (*set_autoneg)(struct hnae_handle *handle, u8 enable);
+ int (*get_autoneg)(struct hnae_handle *handle);
+ int (*set_pauseparam)(struct hnae_handle *handle,
+ u32 auto_neg, u32 rx_en, u32 tx_en);
+ void (*get_coalesce_usecs)(struct hnae_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs);
+ void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames);
+ void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+ int (*set_coalesce_frames)(struct hnae_handle *handle,
+ u32 coalesce_frames);
+ void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
+ int (*get_mac_addr)(struct hnae_handle *handle, void **p);
+ int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+ int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
+ int (*set_mtu)(struct hnae_handle *handle, int new_mtu);
+ void (*update_stats)(struct hnae_handle *handle,
+ struct net_device_stats *net_stats);
+ void (*get_stats)(struct hnae_handle *handle, u64 *data);
+ void (*get_strings)(struct hnae_handle *handle,
+ u32 stringset, u8 *data);
+ int (*get_sset_count)(struct hnae_handle *handle, int stringset);
+ void (*update_led_status)(struct hnae_handle *handle);
+ int (*set_led_id)(struct hnae_handle *handle,
+ enum hnae_led_state status);
+ void (*get_regs)(struct hnae_handle *handle, void *data);
+ int (*get_regs_len)(struct hnae_handle *handle);
+};
+
+struct hnae_ae_dev {
+ struct device cls_dev; /* the class dev */
+ struct device *dev; /* the presented dev */
+ struct hnae_ae_ops *ops;
+ struct list_head node;
+ struct module *owner; /* the module who provides this dev */
+ int id;
+ char name[AE_NAME_SIZE];
+ struct list_head handle_list;
+ spinlock_t lock; /* lock to protect the handle_list */
+};
+
+struct hnae_handle {
+ struct device *owner_dev; /* the device which make use of this handle */
+ struct hnae_ae_dev *dev; /* the device who provides this handle */
+ struct device_node *phy_node;
+ phy_interface_t phy_if;
+ u32 if_support;
+ int q_num;
+ int vf_id;
+ u32 eport_id;
+ enum hnae_port_type port_type;
+ struct list_head node; /* list to hnae_ae_dev->handle_list */
+ struct hnae_buf_ops *bops; /* operation for the buffer */
+ struct hnae_queue **qs; /* array base of all queues */
+};
+
+#define ring_to_dev(ring) ((ring)->q->dev->dev)
+
+struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id,
+ u32 port_id, struct hnae_buf_ops *bops);
+void hnae_put_handle(struct hnae_handle *handle);
+int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
+void hnae_ae_unregister(struct hnae_ae_dev *dev);
+
+int hnae_register_notifier(struct notifier_block *nb);
+void hnae_unregister_notifier(struct notifier_block *nb);
+int hnae_reinit_handle(struct hnae_handle *handle);
+
+#define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \
+ (q)->tx_ring.io_base + RCB_REG_TAIL)
+
+#ifndef assert
+#define assert(cond)
+#endif
+
+static inline int hnae_reserve_buffer_map(struct hnae_ring *ring,
+ struct hnae_desc_cb *cb)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ int ret;
+
+ ret = bops->alloc_buffer(ring, cb);
+ if (ret)
+ goto out;
+
+ ret = bops->map_buffer(ring, cb);
+ if (ret)
+ goto out_with_buf;
+
+ return 0;
+
+out_with_buf:
+ bops->free_buffer(ring, cb);
+out:
+ return ret;
+}
+
+static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i)
+{
+ int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]);
+
+ if (ret)
+ return ret;
+
+ ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+
+ return 0;
+}
+
+static inline void hnae_buffer_detach(struct hnae_ring *ring, int i)
+{
+ ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc[i].addr = 0;
+}
+
+static inline void hnae_free_buffer_detach(struct hnae_ring *ring, int i)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ struct hnae_desc_cb *cb = &ring->desc_cb[i];
+
+ if (!ring->desc_cb[i].dma)
+ return;
+
+ hnae_buffer_detach(ring, i);
+ bops->free_buffer(ring, cb);
+}
+
+/* detach a in-used buffer and replace with a reserved one */
+static inline void hnae_replace_buffer(struct hnae_ring *ring, int i,
+ struct hnae_desc_cb *res_cb)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ struct hnae_desc_cb tmp_cb = ring->desc_cb[i];
+
+ bops->unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc_cb[i] = *res_cb;
+ *res_cb = tmp_cb;
+ ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+ ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
+{
+ ring->desc_cb[i].reuse_flag = 0;
+ ring->desc[i].addr = (__le64)(ring->desc_cb[i].dma
+ + ring->desc_cb[i].page_offset);
+ ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+#define hnae_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= ((val) << (shift)) & (mask); \
+ } while (0)
+
+#define hnae_set_bit(origin, shift, val) \
+ hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
+
+#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define hnae_get_bit(origin, shift) \
+ hnae_get_field((origin), (0x1 << (shift)), (shift))
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
new file mode 100644
index 000000000000..1a16c0307b47
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -0,0 +1,783 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "hnae.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define AE_NAME_PORT_ID_IDX 6
+#define ETH_STATIC_REG 1
+#define ETH_DUMP_REG 5
+#define ETH_GSTRING_LEN 32
+
+static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ return vf_cb->mac_cb;
+}
+
+/**
+ * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
+ * @port_id: enet port id
+ *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
+ * return: dsaf port id
+ *: service ports 0 - 5, debug port 6-7
+ **/
+static int hns_ae_map_eport_to_dport(u32 port_id)
+{
+ int port_index;
+
+ if (port_id < DSAF_DEBUG_NW_NUM)
+ port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
+ else
+ port_index = port_id - DSAF_DEBUG_NW_NUM;
+
+ return port_index;
+}
+
+static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
+{
+ return container_of(dev, struct dsaf_device, ae_dev);
+}
+
+static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
+{
+ int ppe_index;
+ int ppe_common_index;
+ struct ppe_common_cb *ppe_comm;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
+ ppe_index = vf_cb->port_index;
+ ppe_common_index = 0;
+ } else {
+ ppe_index = 0;
+ ppe_common_index =
+ vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
+ }
+ ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
+ return &ppe_comm->ppe_cb[ppe_index];
+}
+
+static int hns_ae_get_q_num_per_vf(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+
+ return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
+}
+
+static int hns_ae_get_vf_num_per_port(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+
+ return dsaf_dev->rcb_common[common_idx]->max_vfn;
+}
+
+static struct ring_pair_cb *hns_ae_get_base_ring_pair(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
+ int q_num = rcb_comm->max_q_per_vf;
+ int vf_num = rcb_comm->max_vfn;
+
+ if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
+ else
+ return &rcb_comm->ring_pair_cb[0];
+}
+
+static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
+{
+ return container_of(q, struct ring_pair_cb, q);
+}
+
+struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+ u32 port_id)
+{
+ int port_idx;
+ int vfnum_per_port;
+ int qnum_per_vf;
+ int i;
+ struct dsaf_device *dsaf_dev;
+ struct hnae_handle *ae_handle;
+ struct ring_pair_cb *ring_pair_cb;
+ struct hnae_vf_cb *vf_cb;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(dev);
+ port_idx = hns_ae_map_eport_to_dport(port_id);
+
+ ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
+ vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
+ qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
+
+ vf_cb = kzalloc(sizeof(*vf_cb) +
+ qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
+ if (unlikely(!vf_cb)) {
+ dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
+ ae_handle = ERR_PTR(-ENOMEM);
+ goto handle_err;
+ }
+ ae_handle = &vf_cb->ae_handle;
+ /* ae_handle Init */
+ ae_handle->owner_dev = dsaf_dev->dev;
+ ae_handle->dev = dev;
+ ae_handle->q_num = qnum_per_vf;
+
+ /* find ring pair, and set vf id*/
+ for (ae_handle->vf_id = 0;
+ ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) {
+ if (!ring_pair_cb->used_by_vf)
+ break;
+ ring_pair_cb += qnum_per_vf;
+ }
+ if (ae_handle->vf_id >= vfnum_per_port) {
+ dev_err(dsaf_dev->dev, "malloc queue fail!\n");
+ ae_handle = ERR_PTR(-EINVAL);
+ goto vf_id_err;
+ }
+
+ ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
+ for (i = 0; i < qnum_per_vf; i++) {
+ ae_handle->qs[i] = &ring_pair_cb->q;
+ ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
+ ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
+
+ ring_pair_cb->used_by_vf = 1;
+ if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+ ring_pair_cb->port_id_in_dsa = port_idx;
+ else
+ ring_pair_cb->port_id_in_dsa = 0;
+
+ ring_pair_cb++;
+ }
+
+ vf_cb->dsaf_dev = dsaf_dev;
+ vf_cb->port_index = port_idx;
+ vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
+
+ ae_handle->phy_if = vf_cb->mac_cb->phy_if;
+ ae_handle->phy_node = vf_cb->mac_cb->phy_node;
+ ae_handle->if_support = vf_cb->mac_cb->if_support;
+ ae_handle->port_type = vf_cb->mac_cb->mac_type;
+
+ return ae_handle;
+vf_id_err:
+ kfree(vf_cb);
+handle_err:
+ return ae_handle;
+}
+
+static void hns_ae_put_handle(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ int i;
+
+ vf_cb->mac_cb = NULL;
+
+ kfree(vf_cb);
+
+ for (i = 0; i < handle->q_num; i++)
+ hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+}
+
+static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
+{
+ int q_num = handle->q_num;
+ int i;
+
+ for (i = 0; i < q_num; i++)
+ hns_rcb_ring_enable_hw(handle->qs[i], val);
+}
+
+static void hns_ae_init_queue(struct hnae_queue *q)
+{
+ struct ring_pair_cb *ring =
+ container_of(q, struct ring_pair_cb, q);
+
+ hns_rcb_init_hw(ring);
+}
+
+static void hns_ae_fini_queue(struct hnae_queue *q)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_rcb_reset_ring_hw(q);
+}
+
+static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
+{
+ int ret;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (!p || !is_valid_ether_addr((const u8 *)p)) {
+ dev_err(handle->owner_dev, "is not valid ether addr !\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
+ if (ret != 0) {
+ dev_err(handle->owner_dev,
+ "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
+{
+ int ret;
+ char *mac_addr = (char *)addr;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ assert(mac_cb);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return 0;
+
+ ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, ENABLE);
+ if (ret) {
+ dev_err(handle->owner_dev,
+ "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
+ mac_addr, mac_cb->mac_id, ret);
+ return ret;
+ }
+
+ ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM,
+ mac_addr, ENABLE);
+ if (ret)
+ dev_err(handle->owner_dev,
+ "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
+ mac_addr, DSAF_BASE_INNER_PORT_NUM, ret);
+
+ return ret;
+}
+
+static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ return hns_mac_set_mtu(mac_cb, new_mtu);
+}
+
+static int hns_ae_start(struct hnae_handle *handle)
+{
+ int ret;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ ret = hns_mac_vm_config_bc_en(mac_cb, 0, ENABLE);
+ if (ret)
+ return ret;
+
+ hns_ae_ring_enable_all(handle, 1);
+ msleep(100);
+
+ hns_mac_start(mac_cb);
+
+ return 0;
+}
+
+void hns_ae_stop(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ /* just clean tx fbd, neednot rx fbd*/
+ hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
+
+ msleep(20);
+
+ hns_mac_stop(mac_cb);
+
+ usleep_range(10000, 20000);
+
+ hns_ae_ring_enable_all(handle, 0);
+
+ (void)hns_mac_vm_config_bc_en(mac_cb, 0, DISABLE);
+}
+
+static void hns_ae_reset(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ u8 ppe_common_index =
+ vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
+
+ hns_mac_reset(vf_cb->mac_cb);
+ hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
+ }
+}
+
+void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+{
+ u32 flag;
+
+ if (is_tx_ring(ring))
+ flag = RCB_INT_FLAG_TX;
+ else
+ flag = RCB_INT_FLAG_RX;
+
+ hns_rcb_int_clr_hw(ring->q, flag);
+ hns_rcb_int_ctrl_hw(ring->q, flag, mask);
+}
+
+static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val)
+{
+ hns_rcb_start(queue, val);
+}
+
+static int hns_ae_get_link_status(struct hnae_handle *handle)
+{
+ u32 link_status;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ hns_mac_get_link_status(mac_cb, &link_status);
+
+ return !!link_status;
+}
+
+static int hns_ae_get_mac_info(struct hnae_handle *handle,
+ u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
+}
+
+static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
+ int duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ hns_mac_adjust_link(mac_cb, speed, duplex);
+}
+
+static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
+ u32 *uplimit)
+{
+ *uplimit = HNS_RCB_RING_MAX_PENDING_BD;
+}
+
+static void hns_ae_get_pauseparam(struct hnae_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en)
+{
+ assert(handle);
+
+ hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg);
+
+ hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en);
+}
+
+static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
+{
+ assert(handle);
+
+ return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
+}
+
+static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
+{
+ hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
+}
+
+static int hns_ae_get_autoneg(struct hnae_handle *handle)
+{
+ u32 auto_neg;
+
+ assert(handle);
+
+ hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg);
+
+ return auto_neg;
+}
+
+static int hns_ae_set_pauseparam(struct hnae_handle *handle,
+ u32 autoneg, u32 rx_en, u32 tx_en)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ int ret;
+
+ ret = hns_mac_set_autoneg(mac_cb, autoneg);
+ if (ret)
+ return ret;
+
+ return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
+}
+
+static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs)
+{
+ int port;
+
+ port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+ *tx_usecs = hns_rcb_get_coalesce_usecs(
+ hns_ae_get_dsaf_dev(handle->dev),
+ hns_dsaf_get_comm_idx_by_port(port));
+ *rx_usecs = hns_rcb_get_coalesce_usecs(
+ hns_ae_get_dsaf_dev(handle->dev),
+ hns_dsaf_get_comm_idx_by_port(port));
+}
+
+static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames)
+{
+ int port;
+
+ assert(handle);
+
+ port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+ *tx_frames = hns_rcb_get_coalesced_frames(
+ hns_ae_get_dsaf_dev(handle->dev), port);
+ *rx_frames = hns_rcb_get_coalesced_frames(
+ hns_ae_get_dsaf_dev(handle->dev), port);
+}
+
+static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+ u32 timeout)
+{
+ int port;
+
+ assert(handle);
+
+ port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+ hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
+ port, timeout);
+}
+
+static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
+ u32 coalesce_frames)
+{
+ int port;
+ int ret;
+
+ assert(handle);
+
+ port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+ ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
+ port, coalesce_frames);
+ return ret;
+}
+
+void hns_ae_update_stats(struct hnae_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ int port;
+ int idx;
+ struct dsaf_device *dsaf_dev;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ struct hnae_queue *queue;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
+ u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
+ u64 rx_missed_errors = 0;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+ if (!dsaf_dev)
+ return;
+ port = vf_cb->port_index;
+ ppe_cb = hns_get_ppe_cb(handle);
+ mac_cb = hns_get_mac_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ queue = handle->qs[idx];
+ hns_rcb_update_stats(queue);
+
+ tx_bytes += queue->tx_ring.stats.tx_bytes;
+ tx_packets += queue->tx_ring.stats.tx_pkts;
+ rx_bytes += queue->rx_ring.stats.rx_bytes;
+ rx_packets += queue->rx_ring.stats.rx_pkts;
+
+ rx_errors += queue->rx_ring.stats.err_pkt_len
+ + queue->rx_ring.stats.l2_err
+ + queue->rx_ring.stats.l3l4_csum_err;
+ }
+
+ hns_ppe_update_stats(ppe_cb);
+ rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf;
+ tx_errors += ppe_cb->hw_stats.tx_err_checksum
+ + ppe_cb->hw_stats.tx_err_fifo_empty;
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+ hns_dsaf_update_stats(dsaf_dev, port);
+ /* for port upline direction, i.e., rx. */
+ rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop;
+ rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop;
+ rx_missed_errors += dsaf_dev->hw_stats[port].crc_false;
+
+ /* for port downline direction, i.e., tx. */
+ port = port + DSAF_PPE_INODE_BASE;
+ hns_dsaf_update_stats(dsaf_dev, port);
+ tx_dropped += dsaf_dev->hw_stats[port].bp_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].pad_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].crc_false;
+ tx_dropped += dsaf_dev->hw_stats[port].rslt_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].vlan_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].stp_drop;
+ }
+
+ hns_mac_update_stats(mac_cb);
+ rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err;
+
+ tx_errors += mac_cb->hw_stats.tx_bad_pkts
+ + mac_cb->hw_stats.tx_fragment_err
+ + mac_cb->hw_stats.tx_jabber_err
+ + mac_cb->hw_stats.tx_underrun_err
+ + mac_cb->hw_stats.tx_crc_err;
+
+ net_stats->tx_bytes = tx_bytes;
+ net_stats->tx_packets = tx_packets;
+ net_stats->rx_bytes = rx_bytes;
+ net_stats->rx_dropped = 0;
+ net_stats->rx_packets = rx_packets;
+ net_stats->rx_errors = rx_errors;
+ net_stats->tx_errors = tx_errors;
+ net_stats->tx_dropped = tx_dropped;
+ net_stats->rx_missed_errors = rx_missed_errors;
+ net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err;
+ net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err;
+ net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err;
+ net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err;
+ net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
+}
+
+void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+{
+ int idx;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ u64 *p = data;
+ struct hnae_vf_cb *vf_cb;
+
+ if (!handle || !data) {
+ pr_err("hns_ae_get_stats NULL handle or data pointer!\n");
+ return;
+ }
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ mac_cb = hns_get_mac_cb(handle);
+ ppe_cb = hns_get_ppe_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ hns_rcb_get_stats(handle->qs[idx], p);
+ p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
+ }
+
+ hns_ppe_get_stats(ppe_cb, p);
+ p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
+
+ hns_mac_get_stats(mac_cb, p);
+ p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
+}
+
+void hns_ae_get_strings(struct hnae_handle *handle,
+ u32 stringset, u8 *data)
+{
+ int port;
+ int idx;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ u8 *p = data;
+ struct hnae_vf_cb *vf_cb;
+
+ assert(handle);
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ port = vf_cb->port_index;
+ mac_cb = hns_get_mac_cb(handle);
+ ppe_cb = hns_get_ppe_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ hns_rcb_get_strings(stringset, p, idx);
+ p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
+ }
+
+ hns_ppe_get_strings(ppe_cb, stringset, p);
+ p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
+
+ hns_mac_get_strings(mac_cb, stringset, p);
+ p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_strings(stringset, p, port);
+}
+
+int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+{
+ u32 sset_count = 0;
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+
+ mac_cb = hns_get_mac_cb(handle);
+
+ sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num;
+ sset_count += hns_ppe_get_sset_count(stringset);
+ sset_count += hns_mac_get_sset_count(mac_cb, stringset);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ sset_count += hns_dsaf_get_sset_count(stringset);
+
+ return sset_count;
+}
+
+static int hns_ae_config_loopback(struct hnae_handle *handle,
+ enum hnae_loop loop, int en)
+{
+ int ret;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ switch (loop) {
+ case MAC_INTERNALLOOP_SERDES:
+ ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
+ break;
+ case MAC_INTERNALLOOP_MAC:
+ ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+void hns_ae_update_led_status(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+ mac_cb = hns_get_mac_cb(handle);
+ if (!mac_cb->cpld_vaddr)
+ return;
+ hns_set_led_opt(mac_cb);
+}
+
+int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+ enum hnae_led_state status)
+{
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+
+ mac_cb = hns_get_mac_cb(handle);
+
+ return hns_cpld_led_set_id(mac_cb, status);
+}
+
+void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+{
+ u32 *p = data;
+ u32 rcb_com_idx;
+ int i;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ hns_ppe_get_regs(ppe_cb, p);
+ p += hns_ppe_get_regs_count();
+
+ rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
+ hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
+ p += hns_rcb_get_common_regs_count();
+
+ for (i = 0; i < handle->q_num; i++) {
+ hns_rcb_get_ring_regs(handle->qs[i], p);
+ p += hns_rcb_get_ring_regs_count();
+ }
+
+ hns_mac_get_regs(vf_cb->mac_cb, p);
+ p += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
+}
+
+int hns_ae_get_regs_len(struct hnae_handle *handle)
+{
+ u32 total_num;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ total_num = hns_ppe_get_regs_count();
+ total_num += hns_rcb_get_common_regs_count();
+ total_num += hns_rcb_get_ring_regs_count() * handle->q_num;
+ total_num += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ total_num += hns_dsaf_get_regs_count();
+
+ return total_num;
+}
+
+static struct hnae_ae_ops hns_dsaf_ops = {
+ .get_handle = hns_ae_get_handle,
+ .put_handle = hns_ae_put_handle,
+ .init_queue = hns_ae_init_queue,
+ .fini_queue = hns_ae_fini_queue,
+ .start = hns_ae_start,
+ .stop = hns_ae_stop,
+ .reset = hns_ae_reset,
+ .toggle_ring_irq = hns_ae_toggle_ring_irq,
+ .toggle_queue_status = hns_ae_toggle_queue_status,
+ .get_status = hns_ae_get_link_status,
+ .get_info = hns_ae_get_mac_info,
+ .adjust_link = hns_ae_adjust_link,
+ .set_loopback = hns_ae_config_loopback,
+ .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
+ .get_pauseparam = hns_ae_get_pauseparam,
+ .set_autoneg = hns_ae_set_autoneg,
+ .get_autoneg = hns_ae_get_autoneg,
+ .set_pauseparam = hns_ae_set_pauseparam,
+ .get_coalesce_usecs = hns_ae_get_coalesce_usecs,
+ .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
+ .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
+ .set_coalesce_frames = hns_ae_set_coalesce_frames,
+ .set_promisc_mode = hns_ae_set_promisc_mode,
+ .set_mac_addr = hns_ae_set_mac_address,
+ .set_mc_addr = hns_ae_set_multicast_one,
+ .set_mtu = hns_ae_set_mtu,
+ .update_stats = hns_ae_update_stats,
+ .get_stats = hns_ae_get_stats,
+ .get_strings = hns_ae_get_strings,
+ .get_sset_count = hns_ae_get_sset_count,
+ .update_led_status = hns_ae_update_led_status,
+ .set_led_id = hns_ae_cpld_set_led_id,
+ .get_regs = hns_ae_get_regs,
+ .get_regs_len = hns_ae_get_regs_len
+};
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
+{
+ struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+
+ ae_dev->ops = &hns_dsaf_ops;
+ ae_dev->dev = dsaf_dev->dev;
+
+ return hnae_ae_register(ae_dev, THIS_MODULE);
+}
+
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev)
+{
+ hnae_ae_unregister(&dsaf_dev->ae_dev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
new file mode 100644
index 000000000000..b8517b00e706
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_gmac.h"
+
+static const struct mac_stats_string g_gmac_stats_string[] = {
+ {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+ {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
+ {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+ {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+ {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
+ {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+ {"gmac_rx_pkts_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+ {"gmac_rx_pkts_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+ {"gmac_rx_pkts_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+ {"gmac_rx_pkts_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+ {"gmac_rx_pkts_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+ {"gmac_rx_fcs_errors", MAC_STATS_FIELD_OFF(rx_fcs_err)},
+ {"gmac_rx_tagged", MAC_STATS_FIELD_OFF(rx_vlan_pkts)},
+ {"gmac_rx_data_err", MAC_STATS_FIELD_OFF(rx_data_err)},
+ {"gmac_rx_align_errors", MAC_STATS_FIELD_OFF(rx_align_err)},
+ {"gmac_rx_long_errors", MAC_STATS_FIELD_OFF(rx_oversize)},
+ {"gmac_rx_jabber_errors", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+ {"gmac_rx_pause_maccontrol", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+ {"gmac_rx_unknown_maccontrol", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+ {"gmac_rx_very_long_err", MAC_STATS_FIELD_OFF(rx_long_err)},
+ {"gmac_rx_runt_err", MAC_STATS_FIELD_OFF(rx_minto64)},
+ {"gmac_rx_short_err", MAC_STATS_FIELD_OFF(rx_under_min)},
+ {"gmac_rx_filt_pkt", MAC_STATS_FIELD_OFF(rx_filter_bytes)},
+ {"gmac_rx_octets_total_filt", MAC_STATS_FIELD_OFF(rx_filter_pkts)},
+ {"gmac_rx_overrun_cnt", MAC_STATS_FIELD_OFF(rx_fifo_overrun_err)},
+ {"gmac_rx_length_err", MAC_STATS_FIELD_OFF(rx_len_err)},
+ {"gmac_rx_fail_comma", MAC_STATS_FIELD_OFF(rx_comma_err)},
+
+ {"gmac_tx_octets_ok", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+ {"gmac_tx_octets_bad", MAC_STATS_FIELD_OFF(tx_bad_bytes)},
+ {"gmac_tx_uc_pkts", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+ {"gmac_tx_mc_pkts", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+ {"gmac_tx_bc_pkts", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+ {"gmac_tx_pkts_64octets", MAC_STATS_FIELD_OFF(tx_64bytes)},
+ {"gmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+ {"gmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+ {"gmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+ {"gmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+ {"gmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+ {"gmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+ {"gmac_tx_excessive_length_drop", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+ {"gmac_tx_underrun", MAC_STATS_FIELD_OFF(tx_underrun_err)},
+ {"gmac_tx_tagged", MAC_STATS_FIELD_OFF(tx_vlan)},
+ {"gmac_tx_crc_error", MAC_STATS_FIELD_OFF(tx_crc_err)},
+ {"gmac_tx_pause_frames", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}
+};
+
+static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*enable GE rX/tX */
+ if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
+
+ if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+}
+
+static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*disable GE rX/tX */
+ if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
+
+ if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+}
+
+/**
+*hns_gmac_get_en - get port enable
+*@mac_drv:mac device
+*@rx:rx enable
+*@tx:tx enable
+*/
+static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 porten;
+
+ porten = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+ *tx = dsaf_get_bit(porten, GMAC_PORT_TX_EN_B);
+ *rx = dsaf_get_bit(porten, GMAC_PORT_RX_EN_B);
+}
+
+static void hns_gmac_free(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ u32 mac_id = drv->mac_id;
+
+ hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0);
+}
+
+static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_field(drv, GMAC_FC_TX_TIMER_REG, GMAC_FC_TX_TIMER_M,
+ GMAC_FC_TX_TIMER_S, newval);
+}
+
+static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *newval = dsaf_get_dev_field(drv, GMAC_FC_TX_TIMER_REG,
+ GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S);
+}
+
+static void hns_gmac_set_rx_auto_pause_frames(void *mac_drv, u32 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_PAUSE_EN_REG,
+ GMAC_PAUSE_EN_RX_FDFC_B, !!newval);
+}
+
+static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_field(drv, GMAC_MAX_FRM_SIZE_REG, GMAC_MAX_FRM_SIZE_M,
+ GMAC_MAX_FRM_SIZE_S, newval);
+
+ dsaf_set_dev_field(drv, GAMC_RX_MAX_FRAME, GMAC_MAX_FRM_SIZE_M,
+ GMAC_MAX_FRM_SIZE_S, newval);
+}
+
+static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+ GMAC_TX_AN_EN_B, !!newval);
+}
+
+static void hns_gmac_tx_loop_pkt_dis(void *mac_drv)
+{
+ u32 tx_loop_pkt_pri;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_loop_pkt_pri = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+ dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_EN_B, 1);
+ dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_HIG_PRI_B, 0);
+ dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri);
+}
+
+static void hns_gmac_set_duplex_type(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+ GMAC_DUPLEX_TYPE_B, !!newval);
+}
+
+static void hns_gmac_get_duplex_type(void *mac_drv,
+ enum hns_gmac_duplex_mdoe *duplex_mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *duplex_mode = (enum hns_gmac_duplex_mdoe)dsaf_get_dev_bit(
+ drv, GMAC_DUPLEX_TYPE_REG, GMAC_DUPLEX_TYPE_B);
+}
+
+static void hns_gmac_get_port_mode(void *mac_drv, enum hns_port_mode *port_mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+ drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+}
+
+static void hns_gmac_port_mode_get(void *mac_drv,
+ struct hns_gmac_port_mode_cfg *port_mode)
+{
+ u32 tx_ctrl;
+ u32 recv_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ port_mode->port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+ drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ recv_ctrl = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+
+ port_mode->max_frm_size =
+ dsaf_get_dev_field(drv, GMAC_MAX_FRM_SIZE_REG,
+ GMAC_MAX_FRM_SIZE_M, GMAC_MAX_FRM_SIZE_S);
+ port_mode->short_runts_thr =
+ dsaf_get_dev_field(drv, GMAC_SHORT_RUNTS_THR_REG,
+ GMAC_SHORT_RUNTS_THR_M,
+ GMAC_SHORT_RUNTS_THR_S);
+
+ port_mode->pad_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_PAD_EN_B);
+ port_mode->crc_add = dsaf_get_bit(tx_ctrl, GMAC_TX_CRC_ADD_B);
+ port_mode->an_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_AN_EN_B);
+
+ port_mode->runt_pkt_en =
+ dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_RUNT_PKT_EN_B);
+ port_mode->strip_pad_en =
+ dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_STRIP_PAD_EN_B);
+}
+
+static void hns_gmac_pause_frm_cfg(void *mac_drv, u32 rx_pause_en,
+ u32 tx_pause_en)
+{
+ u32 pause_en;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+ dsaf_set_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B, !!rx_pause_en);
+ dsaf_set_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B, !!tx_pause_en);
+ dsaf_write_dev(drv, GMAC_PAUSE_EN_REG, pause_en);
+}
+
+static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
+ u32 *tx_pause_en)
+{
+ u32 pause_en;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+
+ *rx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B);
+ *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
+}
+
+static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
+ u32 full_duplex)
+{
+ u32 tx_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+ GMAC_DUPLEX_TYPE_B, !!full_duplex);
+
+ switch (speed) {
+ case MAC_SPEED_10:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x6);
+ break;
+ case MAC_SPEED_100:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x7);
+ break;
+ case MAC_SPEED_1000:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x8);
+ break;
+ default:
+ dev_err(drv->dev,
+ "hns_gmac_adjust_link fail, speed%d mac%d\n",
+ speed, drv->mac_id);
+ return -EINVAL;
+ }
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, 1);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, 1);
+ dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+
+ dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
+ GMAC_MODE_CHANGE_EB_B, 1);
+
+ return 0;
+}
+
+static void hns_gmac_init(void *mac_drv)
+{
+ u32 port;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ port = drv->mac_id;
+
+ hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0);
+ mdelay(10);
+ hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1);
+ mdelay(10);
+ hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+ hns_gmac_tx_loop_pkt_dis(mac_drv);
+}
+
+void hns_gmac_update_stats(void *mac_drv)
+{
+ struct mac_hw_stats *hw_stats = NULL;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ /* RX */
+ hw_stats->rx_good_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+ hw_stats->rx_bad_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+ hw_stats->rx_uc_pkts += dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+ hw_stats->rx_mc_pkts += dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+ hw_stats->rx_bc_pkts += dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+ hw_stats->rx_64bytes
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+ hw_stats->rx_65to127
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+ hw_stats->rx_128to255
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+ hw_stats->rx_256to511
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+ hw_stats->rx_512to1023
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+ hw_stats->rx_1024to1518
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+ hw_stats->rx_1519tomax
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+ hw_stats->rx_fcs_err += dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+ hw_stats->rx_vlan_pkts += dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+ hw_stats->rx_data_err += dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+ hw_stats->rx_align_err
+ += dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+ hw_stats->rx_oversize
+ += dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+ hw_stats->rx_jabber_err
+ += dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+ hw_stats->rx_pfc_tc0
+ += dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+ hw_stats->rx_unknown_ctrl
+ += dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+ hw_stats->rx_long_err
+ += dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+ hw_stats->rx_minto64
+ += dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+ hw_stats->rx_under_min
+ += dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+ hw_stats->rx_filter_pkts
+ += dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+ hw_stats->rx_filter_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+ hw_stats->rx_fifo_overrun_err
+ += dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+ hw_stats->rx_len_err
+ += dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+ hw_stats->rx_comma_err
+ += dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+ /* TX */
+ hw_stats->tx_good_bytes
+ += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+ hw_stats->tx_bad_bytes
+ += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+ hw_stats->tx_uc_pkts += dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+ hw_stats->tx_mc_pkts += dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+ hw_stats->tx_bc_pkts += dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+ hw_stats->tx_64bytes
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+ hw_stats->tx_65to127
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+ hw_stats->tx_128to255
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+ hw_stats->tx_256to511
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+ hw_stats->tx_512to1023
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+ hw_stats->tx_1024to1518
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+ hw_stats->tx_1519tomax
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+ hw_stats->tx_jabber_err
+ += dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+ hw_stats->tx_underrun_err
+ += dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+ hw_stats->tx_vlan += dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+ hw_stats->tx_crc_err += dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+ hw_stats->tx_pfc_tc0
+ += dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+}
+
+static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ if (drv->mac_id >= DSAF_SERVICE_NW_NUM) {
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG, high_val);
+ }
+}
+
+static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
+ u8 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ switch (loop_mode) {
+ case MAC_INTERNALLOOP_MAC:
+ dsaf_set_dev_bit(drv, GMAC_LOOP_REG, GMAC_LP_REG_CF2MI_LP_EN_B,
+ !!enable);
+ break;
+ default:
+ dev_err(drv->dev, "loop_mode error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ u32 tx_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
+ dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+}
+
+static void hns_gmac_get_id(void *mac_drv, u8 *mac_id)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *mac_id = drv->mac_id;
+}
+
+static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+ enum hns_gmac_duplex_mdoe duplex;
+ enum hns_port_mode speed;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 rx;
+ u32 tx;
+ u16 fc_tx_timer;
+ struct hns_gmac_port_mode_cfg port_mode = { GMAC_10M_MII, 0 };
+
+ hns_gmac_port_mode_get(mac_drv, &port_mode);
+ mac_info->pad_and_crc_en = port_mode.crc_add && port_mode.pad_enable;
+ mac_info->auto_neg = port_mode.an_enable;
+
+ hns_gmac_get_tx_auto_pause_frames(mac_drv, &fc_tx_timer);
+ mac_info->tx_pause_time = fc_tx_timer;
+
+ hns_gmac_get_en(mac_drv, &rx, &tx);
+ mac_info->port_en = rx && tx;
+
+ hns_gmac_get_duplex_type(mac_drv, &duplex);
+ mac_info->duplex = duplex;
+
+ hns_gmac_get_port_mode(mac_drv, &speed);
+ switch (speed) {
+ case GMAC_10M_SGMII:
+ mac_info->speed = MAC_SPEED_10;
+ break;
+ case GMAC_100M_SGMII:
+ mac_info->speed = MAC_SPEED_100;
+ break;
+ case GMAC_1000M_SGMII:
+ mac_info->speed = MAC_SPEED_1000;
+ break;
+ default:
+ mac_info->speed = 0;
+ break;
+ }
+
+ hns_gmac_get_pausefrm_cfg(mac_drv, &rx_pause, &tx_pause);
+ mac_info->rx_pause_en = rx_pause;
+ mac_info->tx_pause_en = tx_pause;
+}
+
+static void hns_gmac_autoneg_stat(void *mac_drv, u32 *enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *enable = dsaf_get_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+ GMAC_TX_AN_EN_B);
+}
+
+static void hns_gmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *link_stat = dsaf_get_dev_bit(drv, GMAC_AN_NEG_STATE_REG,
+ GMAC_AN_NEG_STAT_RX_SYNC_OK_B);
+}
+
+static void hns_gmac_get_regs(void *mac_drv, void *data)
+{
+ u32 *regs = data;
+ int i;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /* base config registers */
+ regs[0] = dsaf_read_dev(drv, GMAC_DUPLEX_TYPE_REG);
+ regs[1] = dsaf_read_dev(drv, GMAC_FD_FC_TYPE_REG);
+ regs[2] = dsaf_read_dev(drv, GMAC_FC_TX_TIMER_REG);
+ regs[3] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_LOW_REG);
+ regs[4] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_HIGH_REG);
+ regs[5] = dsaf_read_dev(drv, GMAC_IPG_TX_TIMER_REG);
+ regs[6] = dsaf_read_dev(drv, GMAC_PAUSE_THR_REG);
+ regs[7] = dsaf_read_dev(drv, GMAC_MAX_FRM_SIZE_REG);
+ regs[8] = dsaf_read_dev(drv, GMAC_PORT_MODE_REG);
+ regs[9] = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+ regs[10] = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+ regs[11] = dsaf_read_dev(drv, GMAC_SHORT_RUNTS_THR_REG);
+ regs[12] = dsaf_read_dev(drv, GMAC_AN_NEG_STATE_REG);
+ regs[13] = dsaf_read_dev(drv, GMAC_TX_LOCAL_PAGE_REG);
+ regs[14] = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ regs[15] = dsaf_read_dev(drv, GMAC_REC_FILT_CONTROL_REG);
+ regs[16] = dsaf_read_dev(drv, GMAC_PTP_CONFIG_REG);
+
+ /* rx static registers */
+ regs[17] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+ regs[18] = dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+ regs[19] = dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+ regs[20] = dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+ regs[21] = dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+ regs[22] = dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+ regs[23] = dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+ regs[24] = dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+ regs[25] = dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+ regs[26] = dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+ regs[27] = dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+ regs[28] = dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+ regs[29] = dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+ regs[30] = dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+ regs[31] = dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+ regs[32] = dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+ regs[33] = dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+ regs[34] = dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+ regs[35] = dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+ regs[36] = dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+ regs[37] = dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+ regs[38] = dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+ regs[39] = dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+ regs[40] = dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+ regs[41] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+
+ /* tx static registers */
+ regs[42] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+ regs[43] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+ regs[44] = dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+ regs[45] = dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+ regs[46] = dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+ regs[47] = dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+ regs[48] = dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+ regs[49] = dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+ regs[50] = dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+ regs[51] = dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+ regs[52] = dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+ regs[53] = dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+ regs[54] = dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+ regs[55] = dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+ regs[56] = dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+ regs[57] = dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+ regs[58] = dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+
+ regs[59] = dsaf_read_dev(drv, GAMC_RX_MAX_FRAME);
+ regs[60] = dsaf_read_dev(drv, GMAC_LINE_LOOP_BACK_REG);
+ regs[61] = dsaf_read_dev(drv, GMAC_CF_CRC_STRIP_REG);
+ regs[62] = dsaf_read_dev(drv, GMAC_MODE_CHANGE_EN_REG);
+ regs[63] = dsaf_read_dev(drv, GMAC_SIXTEEN_BIT_CNTR_REG);
+ regs[64] = dsaf_read_dev(drv, GMAC_LD_LINK_COUNTER_REG);
+ regs[65] = dsaf_read_dev(drv, GMAC_LOOP_REG);
+ regs[66] = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+ regs[67] = dsaf_read_dev(drv, GMAC_VLAN_CODE_REG);
+ regs[68] = dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+ regs[69] = dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+ regs[70] = dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+ regs[71] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_0_REG);
+ regs[72] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_0_REG);
+ regs[73] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_1_REG);
+ regs[74] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_1_REG);
+ regs[75] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_2_REG);
+ regs[76] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+ regs[77] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_3_REG);
+ regs[78] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_3_REG);
+ regs[79] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_4_REG);
+ regs[80] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_4_REG);
+ regs[81] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_5_REG);
+ regs[82] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_5_REG);
+ regs[83] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_0_REG);
+ regs[84] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_0_REG);
+ regs[85] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_1_REG);
+ regs[86] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_1_REG);
+ regs[87] = dsaf_read_dev(drv, GMAC_MAC_SKIP_LEN_REG);
+ regs[88] = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+
+ /* mark end of mac regs */
+ for (i = 89; i < 96; i++)
+ regs[i] = 0xaaaaaaaa;
+}
+
+static void hns_gmac_get_stats(void *mac_drv, u64 *data)
+{
+ u32 i;
+ u64 *buf = data;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = NULL;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+ buf[i] = DSAF_STATS_READ(hw_stats,
+ g_gmac_stats_string[i].offset);
+ }
+}
+
+static void hns_gmac_get_strings(u32 stringset, u8 *data)
+{
+ char *buff = (char *)data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+ snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+}
+
+static int hns_gmac_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ARRAY_SIZE(g_gmac_stats_string);
+
+ return 0;
+}
+
+static int hns_gmac_get_regs_count(void)
+{
+ return ETH_GMAC_DUMP_NUM;
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+ struct mac_driver *mac_drv;
+
+ mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+ if (!mac_drv)
+ return NULL;
+
+ mac_drv->mac_init = hns_gmac_init;
+ mac_drv->mac_enable = hns_gmac_enable;
+ mac_drv->mac_disable = hns_gmac_disable;
+ mac_drv->mac_free = hns_gmac_free;
+ mac_drv->adjust_link = hns_gmac_adjust_link;
+ mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
+ mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
+ mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
+
+ mac_drv->mac_id = mac_param->mac_id;
+ mac_drv->mac_mode = mac_param->mac_mode;
+ mac_drv->io_base = mac_param->vaddr;
+ mac_drv->dev = mac_param->dev;
+ mac_drv->mac_cb = mac_cb;
+
+ mac_drv->set_mac_addr = hns_gmac_set_mac_addr;
+ mac_drv->set_an_mode = hns_gmac_config_an_mode;
+ mac_drv->config_loopback = hns_gmac_config_loopback;
+ mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
+ mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
+ mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
+ mac_drv->mac_get_id = hns_gmac_get_id;
+ mac_drv->get_info = hns_gmac_get_info;
+ mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
+ mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
+ mac_drv->get_link_status = hns_gmac_get_link_status;
+ mac_drv->get_regs = hns_gmac_get_regs;
+ mac_drv->get_regs_count = hns_gmac_get_regs_count;
+ mac_drv->get_ethtool_stats = hns_gmac_get_stats;
+ mac_drv->get_sset_count = hns_gmac_get_sset_count;
+ mac_drv->get_strings = hns_gmac_get_strings;
+ mac_drv->update_stats = hns_gmac_update_stats;
+
+ return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
new file mode 100644
index 000000000000..44fe3010dc6d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_GMAC_H
+#define _HNS_GMAC_H
+
+#include "hns_dsaf_mac.h"
+
+enum hns_port_mode {
+ GMAC_10M_MII = 0,
+ GMAC_100M_MII,
+ GMAC_1000M_GMII,
+ GMAC_10M_RGMII,
+ GMAC_100M_RGMII,
+ GMAC_1000M_RGMII,
+ GMAC_10M_SGMII,
+ GMAC_100M_SGMII,
+ GMAC_1000M_SGMII,
+ GMAC_10000M_SGMII /* 10GE */
+};
+
+enum hns_gmac_duplex_mdoe {
+ GMAC_HALF_DUPLEX_MODE = 0,
+ GMAC_FULL_DUPLEX_MODE
+};
+
+struct hns_gmac_port_mode_cfg {
+ enum hns_port_mode port_mode;
+ u32 max_frm_size;
+ u32 short_runts_thr;
+ u32 pad_enable;
+ u32 crc_add;
+ u32 an_enable; /*auto-nego enable */
+ u32 runt_pkt_en;
+ u32 strip_pad_en;
+};
+
+#define ETH_GMAC_DUMP_NUM 96
+#endif /* __HNS_GMAC_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
new file mode 100644
index 000000000000..026b38676cba
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -0,0 +1,902 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_rcb.h"
+
+#define MAC_EN_FLAG_V 0xada0328
+
+static const u16 mac_phy_to_speed[] = {
+ [PHY_INTERFACE_MODE_MII] = MAC_SPEED_100,
+ [PHY_INTERFACE_MODE_GMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_SGMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_TBI] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RMII] = MAC_SPEED_100,
+ [PHY_INTERFACE_MODE_RGMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RTBI] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_XGMII] = MAC_SPEED_10000
+};
+
+static const enum mac_mode g_mac_mode_100[] = {
+ [PHY_INTERFACE_MODE_MII] = MAC_MODE_MII_100,
+ [PHY_INTERFACE_MODE_RMII] = MAC_MODE_RMII_100
+};
+
+static const enum mac_mode g_mac_mode_1000[] = {
+ [PHY_INTERFACE_MODE_GMII] = MAC_MODE_GMII_1000,
+ [PHY_INTERFACE_MODE_SGMII] = MAC_MODE_SGMII_1000,
+ [PHY_INTERFACE_MODE_TBI] = MAC_MODE_TBI_1000,
+ [PHY_INTERFACE_MODE_RGMII] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000
+};
+
+static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb)
+{
+ switch (mac_cb->max_speed) {
+ case MAC_SPEED_100:
+ return g_mac_mode_100[mac_cb->phy_if];
+ case MAC_SPEED_1000:
+ return g_mac_mode_1000[mac_cb->phy_if];
+ case MAC_SPEED_10000:
+ return MAC_MODE_XGMII_10000;
+ default:
+ return MAC_MODE_MII_100;
+ }
+}
+
+static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
+{
+ switch (mac_cb->max_speed) {
+ case MAC_SPEED_100:
+ return g_mac_mode_100[mac_cb->phy_if];
+ case MAC_SPEED_1000:
+ return g_mac_mode_1000[mac_cb->phy_if];
+ case MAC_SPEED_10000:
+ return MAC_MODE_XGMII_10000;
+ default:
+ return MAC_MODE_MII_100;
+ }
+}
+
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ if (!mac_cb->cpld_vaddr)
+ return -ENODEV;
+
+ *sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr
+ + MAC_SFP_PORT_OFFSET);
+
+ return 0;
+}
+
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+{
+ struct mac_driver *mac_ctrl_drv;
+ int ret, sfp_prsnt;
+
+ mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->get_link_status)
+ mac_ctrl_drv->get_link_status(mac_ctrl_drv, link_status);
+ else
+ *link_status = 0;
+
+ ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+ if (!ret)
+ *link_status = *link_status && sfp_prsnt;
+
+ mac_cb->link = *link_status;
+}
+
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+ u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+ struct mac_driver *mac_ctrl_drv;
+ struct mac_info info;
+
+ mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (!mac_ctrl_drv->get_info)
+ return -ENODEV;
+
+ mac_ctrl_drv->get_info(mac_ctrl_drv, &info);
+ if (auto_neg)
+ *auto_neg = info.auto_neg;
+ if (speed)
+ *speed = info.speed;
+ if (duplex)
+ *duplex = info.duplex;
+
+ return 0;
+}
+
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+ int ret;
+ struct mac_driver *mac_ctrl_drv;
+
+ mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+ mac_cb->speed = speed;
+ mac_cb->half_duplex = !duplex;
+ mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb);
+
+ if (mac_ctrl_drv->adjust_link) {
+ ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
+ (enum mac_speed)speed, duplex);
+ if (ret) {
+ dev_err(mac_cb->dev,
+ "adjust_link failed,%s mac%d ret = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, ret);
+ return;
+ }
+ }
+}
+
+/**
+ *hns_mac_get_inner_port_num - get mac table inner port number
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@port_num:port number
+ *
+ */
+static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+ u8 vmid, u8 *port_num)
+{
+ u8 tmp_port;
+ u32 comm_idx;
+
+ if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
+ if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) {
+ dev_err(mac_cb->dev,
+ "input invalid,%s mac%d vmid%d !\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+ } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
+ if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) {
+ dev_err(mac_cb->dev,
+ "input invalid,%s mac%d vmid%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+ return -EINVAL;
+ }
+
+ comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id);
+
+ if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) {
+ dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+
+ switch (mac_cb->dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ tmp_port = 0;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ tmp_port = 0;
+ break;
+ case DSAF_MODE_ENABLE_0VM:
+ case DSAF_MODE_ENABLE_8VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_ENABLE_128VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ tmp_port = vmid;
+ break;
+ default:
+ dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+ return -EINVAL;
+ }
+ tmp_port += DSAF_BASE_INNER_PORT_NUM;
+
+ *port_num = tmp_port;
+
+ return 0;
+}
+
+/**
+ *hns_mac_get_inner_port_num - change vf mac address
+ *@mac_cb: mac device
+ *@vmid: vmid
+ *@addr:mac address
+ */
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
+ u32 vmid, char *addr)
+{
+ int ret;
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ struct mac_entry_idx *old_entry;
+
+ old_entry = &mac_cb->addr_entry_idx[vmid];
+ if (dsaf_dev) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = old_entry->vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, (u8)vmid,
+ &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ if ((old_entry->valid != 0) &&
+ (memcmp(old_entry->addr,
+ addr, sizeof(mac_entry.addr)) != 0)) {
+ ret = hns_dsaf_del_mac_entry(dsaf_dev,
+ old_entry->vlan_id,
+ mac_cb->mac_id,
+ old_entry->addr);
+ if (ret)
+ return ret;
+ }
+
+ ret = hns_dsaf_set_mac_uc_entry(dsaf_dev, &mac_entry);
+ if (ret)
+ return ret;
+ }
+
+ if ((mac_ctrl_drv->set_mac_addr) && (vmid == 0))
+ mac_ctrl_drv->set_mac_addr(mac_cb->priv.mac, addr);
+
+ memcpy(old_entry->addr, addr, sizeof(old_entry->addr));
+ old_entry->valid = 1;
+ return 0;
+}
+
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+ u32 port_num, char *addr, u8 en)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ if (dsaf_dev && addr) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = 0;/*vlan_id;*/
+ mac_entry.in_port_num = mac_cb->mac_id;
+ mac_entry.port_num = port_num;
+
+ if (en == DISABLE)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "set mac mc port failed,%s mac%d ret = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ *hns_mac_del_mac - delete mac address into dsaf table,can't delete the same
+ * address twice
+ *@net_dev: net device
+ *@vfn : vf lan
+ *@mac : mac address
+ *return status
+ */
+int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
+{
+ struct mac_entry_idx *old_mac;
+ struct dsaf_device *dsaf_dev;
+ u32 ret;
+
+ dsaf_dev = mac_cb->dsaf_dev;
+
+ if (vfn < DSAF_MAX_VM_NUM) {
+ old_mac = &mac_cb->addr_entry_idx[vfn];
+ } else {
+ dev_err(mac_cb->dev,
+ "vf queue is too large,%s mac%d queue = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
+ return -EINVAL;
+ }
+
+ if (dsaf_dev) {
+ ret = hns_dsaf_del_mac_entry(dsaf_dev, old_mac->vlan_id,
+ mac_cb->mac_id, old_mac->addr);
+ if (ret)
+ return ret;
+
+ if (memcmp(old_mac->addr, mac, sizeof(old_mac->addr)) == 0)
+ old_mac->valid = 0;
+ }
+
+ return 0;
+}
+
+static void hns_mac_param_get(struct mac_params *param,
+ struct hns_mac_cb *mac_cb)
+{
+ param->vaddr = (void *)mac_cb->vaddr;
+ param->mac_mode = hns_get_enet_interface(mac_cb);
+ memcpy(param->addr, mac_cb->addr_entry_idx[0].addr,
+ MAC_NUM_OCTETS_PER_ADDR);
+ param->mac_id = mac_cb->mac_id;
+ param->dev = mac_cb->dev;
+}
+
+/**
+ *hns_mac_queue_config_bc_en - set broadcast rx&tx enable
+ *@mac_cb: mac device
+ *@queue: queue number
+ *@en:enable
+ *retuen 0 - success , negative --fail
+ */
+static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
+ u32 port_num, u16 vlan_id, u8 en)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR]
+ = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ /* directy return ok in debug network mode */
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ return 0;
+
+ if (dsaf_dev) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ mac_entry.port_num = port_num;
+
+ if (en == DISABLE)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ *hns_mac_vm_config_bc_en - set broadcast rx&tx enable
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@en:enable
+ *retuen 0 - success , negative --fail
+ */
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ u8 port_num;
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR]
+ = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct mac_entry_idx *uc_mac_entry;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ return 0;
+
+ uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
+
+ if (dsaf_dev) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vmid, &port_num);
+ if (ret)
+ return ret;
+ mac_entry.port_num = port_num;
+
+ if (en == DISABLE)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ return ret;
+ }
+
+ return 0;
+}
+
+void hns_mac_reset(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *drv;
+
+ drv = hns_mac_get_drv(mac_cb);
+
+ drv->mac_init(drv);
+
+ if (drv->config_max_frame_length)
+ drv->config_max_frame_length(drv, mac_cb->max_frm);
+
+ if (drv->set_tx_auto_pause_frames)
+ drv->set_tx_auto_pause_frames(drv, mac_cb->tx_pause_frm_time);
+
+ if (drv->set_an_mode)
+ drv->set_an_mode(drv, 1);
+
+ if (drv->mac_pausefrm_cfg) {
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ drv->mac_pausefrm_cfg(drv, 0, 0);
+ else /* mac rx must disable, dsaf pfc close instead of it*/
+ drv->mac_pausefrm_cfg(drv, 0, 1);
+ }
+}
+
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
+{
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+ u32 buf_size = mac_cb->dsaf_dev->buf_size;
+ u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((new_mtu < MAC_MIN_MTU) || (new_frm > MAC_MAX_MTU) ||
+ (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size))
+ return -EINVAL;
+
+ if (!drv->config_max_frame_length)
+ return -ECHILD;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (new_frm < (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN))
+ new_frm = (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN);
+
+ drv->config_max_frame_length(drv, new_frm);
+
+ mac_cb->max_frm = new_frm;
+
+ return 0;
+}
+
+void hns_mac_start(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_drv = hns_mac_get_drv(mac_cb);
+
+ /* for virt */
+ if (mac_drv->mac_en_flg == MAC_EN_FLAG_V) {
+ /*plus 1 when the virtual mac has been enabled */
+ mac_drv->virt_dev_num += 1;
+ return;
+ }
+
+ if (mac_drv->mac_enable) {
+ mac_drv->mac_enable(mac_cb->priv.mac, MAC_COMM_MODE_RX_AND_TX);
+ mac_drv->mac_en_flg = MAC_EN_FLAG_V;
+ }
+}
+
+void hns_mac_stop(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ /*modified for virtualization */
+ if (mac_ctrl_drv->virt_dev_num > 0) {
+ mac_ctrl_drv->virt_dev_num -= 1;
+ if (mac_ctrl_drv->virt_dev_num > 0)
+ return;
+ }
+
+ if (mac_ctrl_drv->mac_disable)
+ mac_ctrl_drv->mac_disable(mac_cb->priv.mac,
+ MAC_COMM_MODE_RX_AND_TX);
+
+ mac_ctrl_drv->mac_en_flg = 0;
+ mac_cb->link = 0;
+ cpld_led_reset(mac_cb);
+}
+
+/**
+ * hns_mac_get_autoneg - get auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * retuen 0 - success , negative --fail
+ */
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->autoneg_stat)
+ mac_ctrl_drv->autoneg_stat(mac_ctrl_drv, auto_neg);
+ else
+ *auto_neg = 0;
+}
+
+/**
+ * hns_mac_get_pauseparam - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable status
+ * @tx_en: tx enable status
+ * retuen 0 - success , negative --fail
+ */
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->get_pause_enable) {
+ mac_ctrl_drv->get_pause_enable(mac_ctrl_drv, rx_en, tx_en);
+ } else {
+ *rx_en = 0;
+ *tx_en = 0;
+ }
+
+ /* Due to the chip defect, the service mac's rx pause CAN'T be enabled.
+ * We set the rx pause frm always be true (1), because DSAF deals with
+ * the rx pause frm instead of service mac. After all, we still support
+ * rx pause frm.
+ */
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ *rx_en = 1;
+}
+
+/**
+ * hns_mac_set_autoneg - set auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) {
+ dev_err(mac_cb->dev, "enable autoneg is not allowed!");
+ return -ENOTSUPP;
+ }
+
+ if (mac_ctrl_drv->set_an_mode)
+ mac_ctrl_drv->set_an_mode(mac_ctrl_drv, enable);
+
+ return 0;
+}
+
+/**
+ * hns_mac_set_autoneg - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable or not
+ * @tx_en: tx enable or not
+ * return 0 - success , negative --fail
+ */
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+ if (!rx_en) {
+ dev_err(mac_cb->dev, "disable rx_pause is not allowed!");
+ return -EINVAL;
+ }
+ } else if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ if (tx_en || rx_en) {
+ dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(mac_cb->dev, "Unsupport this operation!");
+ return -EINVAL;
+ }
+
+ if (mac_ctrl_drv->mac_pausefrm_cfg)
+ mac_ctrl_drv->mac_pausefrm_cfg(mac_ctrl_drv, rx_en, tx_en);
+
+ return 0;
+}
+
+/**
+ * hns_mac_init_ex - mac init
+ * @mac_cb: mac control block
+ * retuen 0 - success , negative --fail
+ */
+static int hns_mac_init_ex(struct hns_mac_cb *mac_cb)
+{
+ int ret;
+ struct mac_params param;
+ struct mac_driver *drv;
+
+ hns_dsaf_fix_mac_mode(mac_cb);
+
+ memset(&param, 0, sizeof(struct mac_params));
+ hns_mac_param_get(&param, mac_cb);
+
+ if (MAC_SPEED_FROM_MODE(param.mac_mode) < MAC_SPEED_10000)
+ drv = (struct mac_driver *)hns_gmac_config(mac_cb, &param);
+ else
+ drv = (struct mac_driver *)hns_xgmac_config(mac_cb, &param);
+
+ if (!drv)
+ return -ENOMEM;
+
+ mac_cb->priv.mac = (void *)drv;
+ hns_mac_reset(mac_cb);
+
+ hns_mac_adjust_link(mac_cb, mac_cb->speed, !mac_cb->half_duplex);
+
+ ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, ENABLE);
+ if (ret)
+ goto free_mac_drv;
+
+ return 0;
+
+free_mac_drv:
+ drv->mac_free(mac_cb->priv.mac);
+ mac_cb->priv.mac = NULL;
+
+ return ret;
+}
+
+/**
+ *mac_free_dev - get mac information from device node
+ *@mac_cb: mac device
+ *@np:device node
+ *@mac_mode_idx:mac mode index
+ */
+static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
+ struct device_node *np, u32 mac_mode_idx)
+{
+ mac_cb->link = false;
+ mac_cb->half_duplex = false;
+ mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
+ mac_cb->max_speed = mac_cb->speed;
+
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ mac_cb->if_support = MAC_GMAC_SUPPORTED;
+ mac_cb->if_support |= SUPPORTED_1000baseT_Full;
+ } else if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ mac_cb->if_support = SUPPORTED_10000baseR_FEC;
+ mac_cb->if_support |= SUPPORTED_10000baseKR_Full;
+ }
+
+ mac_cb->max_frm = MAC_DEFAULT_MTU;
+ mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+
+ /* Get the rest of the PHY information */
+ mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id);
+ if (mac_cb->phy_node)
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+ mac_cb->mac_id, mac_cb->phy_node->name);
+}
+
+/**
+ * hns_mac_get_mode - get mac mode
+ * @phy_if: phy interface
+ * retuen 0 - gmac, 1 - xgmac , negative --fail
+ */
+static int hns_mac_get_mode(phy_interface_t phy_if)
+{
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_SGMII:
+ return MAC_GMAC_IDX;
+ case PHY_INTERFACE_MODE_XGMII:
+ return MAC_XGMAC_IDX;
+ default:
+ return -EINVAL;
+ }
+}
+
+u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+ struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+{
+ u8 __iomem *base = dsaf_dev->io_base;
+ int mac_id = mac_cb->mac_id;
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ return base + 0x40000 + mac_id * 0x4000 -
+ mac_mode_idx * 0x20000;
+ else
+ return mac_cb->serdes_vaddr + 0x1000
+ + (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000;
+}
+
+/**
+ * hns_mac_get_cfg - get mac cfg from dtb or acpi table
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_idx: mac index
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
+{
+ int ret;
+ u32 mac_mode_idx;
+ struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx];
+
+ mac_cb->dsaf_dev = dsaf_dev;
+ mac_cb->dev = dsaf_dev->dev;
+ mac_cb->mac_id = mac_idx;
+
+ mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base;
+ mac_cb->serdes_vaddr = dsaf_dev->sds_base;
+
+ if (dsaf_dev->cpld_base &&
+ mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
+ mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
+ mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
+ cpld_led_reset(mac_cb);
+ }
+ mac_cb->sfp_prsnt = 0;
+ mac_cb->txpkt_for_led = 0;
+ mac_cb->rxpkt_for_led = 0;
+
+ if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+ mac_cb->mac_type = HNAE_PORT_SERVICE;
+ else
+ mac_cb->mac_type = HNAE_PORT_DEBUG;
+
+ mac_cb->phy_if = hns_mac_get_phy_if(mac_cb);
+
+ ret = hns_mac_get_mode(mac_cb->phy_if);
+ if (ret < 0) {
+ dev_err(dsaf_dev->dev,
+ "hns_mac_get_mode failed,mac%d ret = %#x!\n",
+ mac_cb->mac_id, ret);
+ return ret;
+ }
+ mac_mode_idx = (u32)ret;
+
+ hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx);
+
+ mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
+
+ return 0;
+}
+
+/**
+ * hns_mac_init - init mac
+ * @dsaf_dev: dsa fabric device struct pointer
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_init(struct dsaf_device *dsaf_dev)
+{
+ int i;
+ int ret;
+ size_t size;
+ struct hns_mac_cb *mac_cb;
+
+ size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP;
+ dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL);
+ if (!dsaf_dev->mac_cb)
+ return -ENOMEM;
+
+ for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) {
+ ret = hns_mac_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto free_mac_cb;
+
+ mac_cb = &dsaf_dev->mac_cb[i];
+ ret = hns_mac_init_ex(mac_cb);
+ if (ret)
+ goto free_mac_cb;
+ }
+
+ return 0;
+
+free_mac_cb:
+ dsaf_dev->mac_cb = NULL;
+
+ return ret;
+}
+
+void hns_mac_uninit(struct dsaf_device *dsaf_dev)
+{
+ cpld_led_reset(dsaf_dev->mac_cb);
+ dsaf_dev->mac_cb = NULL;
+}
+
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+ enum hnae_loop loop, int en)
+{
+ int ret;
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+ if (drv->config_loopback)
+ ret = drv->config_loopback(drv, loop, en);
+ else
+ ret = -ENOTSUPP;
+
+ return ret;
+}
+
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->update_stats(mac_ctrl_drv);
+}
+
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data);
+}
+
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb,
+ int stringset, u8 *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_strings(stringset, data);
+}
+
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ return mac_ctrl_drv->get_sset_count(stringset);
+}
+
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ return mac_ctrl_drv->get_regs_count();
+}
+
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_regs(mac_ctrl_drv, data);
+}
+
+void hns_set_led_opt(struct hns_mac_cb *mac_cb)
+{
+ int nic_data = 0;
+ int txpkts, rxpkts;
+
+ txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts;
+ rxpkts = mac_cb->rxpkt_for_led - mac_cb->hw_stats.rx_good_pkts;
+ if (txpkts || rxpkts)
+ nic_data = 1;
+ else
+ nic_data = 0;
+ mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
+ mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
+ hns_cpld_set_led(mac_cb, (int)mac_cb->link,
+ mac_cb->speed, nic_data);
+}
+
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ if (!mac_cb || !mac_cb->cpld_vaddr)
+ return 0;
+
+ return cpld_set_led_id(mac_cb, status);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
new file mode 100644
index 000000000000..7da95a7581f9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_MAC_H
+#define _HNS_DSAF_MAC_H
+
+#include <linux/phy.h>
+#include <linux/kernel.h>
+#include <linux/if_vlan.h>
+#include "hns_dsaf_main.h"
+
+struct dsaf_device;
+
+#define MAC_GMAC_SUPPORTED \
+ (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg)
+
+#define MAC_DEFAULT_MTU (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MAC_MAX_MTU 9600
+#define MAC_MIN_MTU 68
+
+#define MAC_DEFAULT_PAUSE_TIME 0xff
+
+#define MAC_GMAC_IDX 0
+#define MAC_XGMAC_IDX 1
+
+#define ETH_STATIC_REG 1
+#define ETH_DUMP_REG 5
+/* check mac addr broadcast */
+#define MAC_IS_BROADCAST(p) ((*(p) == 0xff) && (*((p) + 1) == 0xff) && \
+ (*((p) + 2) == 0xff) && (*((p) + 3) == 0xff) && \
+ (*((p) + 4) == 0xff) && (*((p) + 5) == 0xff))
+
+/* check mac addr is 01-00-5e-xx-xx-xx*/
+#define MAC_IS_L3_MULTICAST(p) ((*((p) + 0) == 0x01) && \
+ (*((p) + 1) == 0x00) && \
+ (*((p) + 2) == 0x5e))
+
+/*check the mac addr is 0 in all bit*/
+#define MAC_IS_ALL_ZEROS(p) ((*(p) == 0) && (*((p) + 1) == 0) && \
+ (*((p) + 2) == 0) && (*((p) + 3) == 0) && \
+ (*((p) + 4) == 0) && (*((p) + 5) == 0))
+
+/*check mac addr multicast*/
+#define MAC_IS_MULTICAST(p) ((*((u8 *)((p) + 0)) & 0x01) ? (1) : (0))
+
+/**< Number of octets (8-bit bytes) in an ethernet address */
+#define MAC_NUM_OCTETS_PER_ADDR 6
+
+struct mac_priv {
+ void *mac;
+};
+
+/* net speed */
+enum mac_speed {
+ MAC_SPEED_10 = 10, /**< 10 Mbps */
+ MAC_SPEED_100 = 100, /**< 100 Mbps */
+ MAC_SPEED_1000 = 1000, /**< 1000 Mbps = 1 Gbps */
+ MAC_SPEED_10000 = 10000 /**< 10000 Mbps = 10 Gbps */
+};
+
+/*mac interface keyword */
+enum mac_intf {
+ MAC_IF_NONE = 0x00000000, /**< interface not invalid */
+ MAC_IF_MII = 0x00010000, /**< MII interface */
+ MAC_IF_RMII = 0x00020000, /**< RMII interface */
+ MAC_IF_SMII = 0x00030000, /**< SMII interface */
+ MAC_IF_GMII = 0x00040000, /**< GMII interface */
+ MAC_IF_RGMII = 0x00050000, /**< RGMII interface */
+ MAC_IF_TBI = 0x00060000, /**< TBI interface */
+ MAC_IF_RTBI = 0x00070000, /**< RTBI interface */
+ MAC_IF_SGMII = 0x00080000, /**< SGMII interface */
+ MAC_IF_XGMII = 0x00090000, /**< XGMII interface */
+ MAC_IF_QSGMII = 0x000a0000 /**< QSGMII interface */
+};
+
+/*mac mode */
+enum mac_mode {
+ /**< Invalid Ethernet mode */
+ MAC_MODE_INVALID = 0,
+ /**< 10 Mbps MII */
+ MAC_MODE_MII_10 = (MAC_IF_MII | MAC_SPEED_10),
+ /**< 100 Mbps MII */
+ MAC_MODE_MII_100 = (MAC_IF_MII | MAC_SPEED_100),
+ /**< 10 Mbps RMII */
+ MAC_MODE_RMII_10 = (MAC_IF_RMII | MAC_SPEED_10),
+ /**< 100 Mbps RMII */
+ MAC_MODE_RMII_100 = (MAC_IF_RMII | MAC_SPEED_100),
+ /**< 10 Mbps SMII */
+ MAC_MODE_SMII_10 = (MAC_IF_SMII | MAC_SPEED_10),
+ /**< 100 Mbps SMII */
+ MAC_MODE_SMII_100 = (MAC_IF_SMII | MAC_SPEED_100),
+ /**< 1000 Mbps GMII */
+ MAC_MODE_GMII_1000 = (MAC_IF_GMII | MAC_SPEED_1000),
+ /**< 10 Mbps RGMII */
+ MAC_MODE_RGMII_10 = (MAC_IF_RGMII | MAC_SPEED_10),
+ /**< 100 Mbps RGMII */
+ MAC_MODE_RGMII_100 = (MAC_IF_RGMII | MAC_SPEED_100),
+ /**< 1000 Mbps RGMII */
+ MAC_MODE_RGMII_1000 = (MAC_IF_RGMII | MAC_SPEED_1000),
+ /**< 1000 Mbps TBI */
+ MAC_MODE_TBI_1000 = (MAC_IF_TBI | MAC_SPEED_1000),
+ /**< 1000 Mbps RTBI */
+ MAC_MODE_RTBI_1000 = (MAC_IF_RTBI | MAC_SPEED_1000),
+ /**< 10 Mbps SGMII */
+ MAC_MODE_SGMII_10 = (MAC_IF_SGMII | MAC_SPEED_10),
+ /**< 100 Mbps SGMII */
+ MAC_MODE_SGMII_100 = (MAC_IF_SGMII | MAC_SPEED_100),
+ /**< 1000 Mbps SGMII */
+ MAC_MODE_SGMII_1000 = (MAC_IF_SGMII | MAC_SPEED_1000),
+ /**< 10000 Mbps XGMII */
+ MAC_MODE_XGMII_10000 = (MAC_IF_XGMII | MAC_SPEED_10000),
+ /**< 1000 Mbps QSGMII */
+ MAC_MODE_QSGMII_1000 = (MAC_IF_QSGMII | MAC_SPEED_1000)
+};
+
+/*mac communicate mode*/
+enum mac_commom_mode {
+ MAC_COMM_MODE_NONE = 0, /**< No transmit/receive communication */
+ MAC_COMM_MODE_RX = 1, /**< Only receive communication */
+ MAC_COMM_MODE_TX = 2, /**< Only transmit communication */
+ MAC_COMM_MODE_RX_AND_TX = 3 /**< Both tx and rx communication */
+};
+
+/*mac statistics */
+struct mac_statistics {
+ u64 stat_pkts64; /* r-10G tr-DT 64 byte frame counter */
+ u64 stat_pkts65to127; /* r-10G 65 to 127 byte frame counter */
+ u64 stat_pkts128to255; /* r-10G 128 to 255 byte frame counter */
+ u64 stat_pkts256to511; /*r-10G 256 to 511 byte frame counter */
+ u64 stat_pkts512to1023;/* r-10G 512 to 1023 byte frame counter */
+ u64 stat_pkts1024to1518; /* r-10G 1024 to 1518 byte frame counter */
+ u64 stat_pkts1519to1522; /* r-10G 1519 to 1522 byte good frame count*/
+ /* Total number of packets that were less than 64 octets */
+ /* long with a wrong CRC.*/
+ u64 stat_fragments;
+ /* Total number of packets longer than valid maximum length octets */
+ u64 stat_jabbers;
+ /* number of dropped packets due to internal errors of */
+ /* the MAC Client. */
+ u64 stat_drop_events;
+ /* Incremented when frames of correct length but with */
+ /* CRC error are received.*/
+ u64 stat_crc_align_errors;
+ /* Total number of packets that were less than 64 octets */
+ /* long with a good CRC.*/
+ u64 stat_undersize_pkts;
+ u64 stat_oversize_pkts; /**< T,B.D*/
+
+ u64 stat_rx_pause; /**< Pause MAC Control received */
+ u64 stat_tx_pause; /**< Pause MAC Control sent */
+
+ u64 in_octets; /**< Total number of byte received. */
+ u64 in_pkts; /* Total number of packets received.*/
+ u64 in_mcast_pkts; /* Total number of multicast frame received */
+ u64 in_bcast_pkts; /* Total number of broadcast frame received */
+ /* Frames received, but discarded due to */
+ /* problems within the MAC RX. */
+ u64 in_discards;
+ u64 in_errors; /* Number of frames received with error: */
+ /* - FIFO Overflow Error */
+ /* - CRC Error */
+ /* - Frame Too Long Error */
+ /* - Alignment Error */
+ u64 out_octets; /*Total number of byte sent. */
+ u64 out_pkts; /**< Total number of packets sent .*/
+ u64 out_mcast_pkts; /* Total number of multicast frame sent */
+ u64 out_bcast_pkts; /* Total number of multicast frame sent */
+ /* Frames received, but discarded due to problems within */
+ /* the MAC TX N/A!.*/
+ u64 out_discards;
+ u64 out_errors; /*Number of frames transmitted with error: */
+ /* - FIFO Overflow Error */
+ /* - FIFO Underflow Error */
+ /* - Other */
+};
+
+/*mac para struct ,mac get param from nic or dsaf when initialize*/
+struct mac_params {
+ char addr[MAC_NUM_OCTETS_PER_ADDR];
+ void *vaddr; /*virtual address*/
+ struct device *dev;
+ u8 mac_id;
+ /**< Ethernet operation mode (MAC-PHY interface and speed) */
+ enum mac_mode mac_mode;
+};
+
+struct mac_info {
+ u16 speed;/* The forced speed (lower bits) in */
+ /* *mbps. Please use */
+ /* * ethtool_cmd_speed()/_set() to */
+ /* * access it */
+ u8 duplex; /* Duplex, half or full */
+ u8 auto_neg; /* Enable or disable autonegotiation */
+ enum hnae_loop loop_mode;
+ u8 tx_pause_en;
+ u8 tx_pause_time;
+ u8 rx_pause_en;
+ u8 pad_and_crc_en;
+ u8 promiscuous_en;
+ u8 port_en; /*port enable*/
+};
+
+struct mac_entry_idx {
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+ u16 vlan_id:12;
+ u16 valid:1;
+ u16 qos:3;
+};
+
+struct mac_hw_stats {
+ u64 rx_good_pkts; /* only for xgmac */
+ u64 rx_good_bytes;
+ u64 rx_total_pkts; /* only for xgmac */
+ u64 rx_total_bytes; /* only for xgmac */
+ u64 rx_bad_bytes; /* only for gmac */
+ u64 rx_uc_pkts;
+ u64 rx_mc_pkts;
+ u64 rx_bc_pkts;
+ u64 rx_fragment_err; /* only for xgmac */
+ u64 rx_undersize; /* only for xgmac */
+ u64 rx_under_min;
+ u64 rx_minto64; /* only for gmac */
+ u64 rx_64bytes;
+ u64 rx_65to127;
+ u64 rx_128to255;
+ u64 rx_256to511;
+ u64 rx_512to1023;
+ u64 rx_1024to1518;
+ u64 rx_1519tomax;
+ u64 rx_1519tomax_good; /* only for xgmac */
+ u64 rx_oversize;
+ u64 rx_jabber_err;
+ u64 rx_fcs_err;
+ u64 rx_vlan_pkts; /* only for gmac */
+ u64 rx_data_err; /* only for gmac */
+ u64 rx_align_err; /* only for gmac */
+ u64 rx_long_err; /* only for gmac */
+ u64 rx_pfc_tc0;
+ u64 rx_pfc_tc1; /* only for xgmac */
+ u64 rx_pfc_tc2; /* only for xgmac */
+ u64 rx_pfc_tc3; /* only for xgmac */
+ u64 rx_pfc_tc4; /* only for xgmac */
+ u64 rx_pfc_tc5; /* only for xgmac */
+ u64 rx_pfc_tc6; /* only for xgmac */
+ u64 rx_pfc_tc7; /* only for xgmac */
+ u64 rx_unknown_ctrl;
+ u64 rx_filter_pkts; /* only for gmac */
+ u64 rx_filter_bytes; /* only for gmac */
+ u64 rx_fifo_overrun_err;/* only for gmac */
+ u64 rx_len_err; /* only for gmac */
+ u64 rx_comma_err; /* only for gmac */
+ u64 rx_symbol_err; /* only for xgmac */
+ u64 tx_good_to_sw; /* only for xgmac */
+ u64 tx_bad_to_sw; /* only for xgmac */
+ u64 rx_1731_pkts; /* only for xgmac */
+
+ u64 tx_good_bytes;
+ u64 tx_good_pkts; /* only for xgmac */
+ u64 tx_total_bytes; /* only for xgmac */
+ u64 tx_total_pkts; /* only for xgmac */
+ u64 tx_bad_bytes; /* only for gmac */
+ u64 tx_bad_pkts; /* only for xgmac */
+ u64 tx_uc_pkts;
+ u64 tx_mc_pkts;
+ u64 tx_bc_pkts;
+ u64 tx_undersize; /* only for xgmac */
+ u64 tx_fragment_err; /* only for xgmac */
+ u64 tx_under_min_pkts; /* only for gmac */
+ u64 tx_64bytes;
+ u64 tx_65to127;
+ u64 tx_128to255;
+ u64 tx_256to511;
+ u64 tx_512to1023;
+ u64 tx_1024to1518;
+ u64 tx_1519tomax;
+ u64 tx_1519tomax_good; /* only for xgmac */
+ u64 tx_oversize; /* only for xgmac */
+ u64 tx_jabber_err;
+ u64 tx_underrun_err; /* only for gmac */
+ u64 tx_vlan; /* only for gmac */
+ u64 tx_crc_err; /* only for gmac */
+ u64 tx_pfc_tc0;
+ u64 tx_pfc_tc1; /* only for xgmac */
+ u64 tx_pfc_tc2; /* only for xgmac */
+ u64 tx_pfc_tc3; /* only for xgmac */
+ u64 tx_pfc_tc4; /* only for xgmac */
+ u64 tx_pfc_tc5; /* only for xgmac */
+ u64 tx_pfc_tc6; /* only for xgmac */
+ u64 tx_pfc_tc7; /* only for xgmac */
+ u64 tx_ctrl; /* only for xgmac */
+ u64 tx_1731_pkts; /* only for xgmac */
+ u64 tx_1588_pkts; /* only for xgmac */
+ u64 rx_good_from_sw; /* only for xgmac */
+ u64 rx_bad_from_sw; /* only for xgmac */
+};
+
+struct hns_mac_cb {
+ struct device *dev;
+ struct dsaf_device *dsaf_dev;
+ struct mac_priv priv;
+ u8 __iomem *vaddr;
+ u8 __iomem *cpld_vaddr;
+ u8 __iomem *sys_ctl_vaddr;
+ u8 __iomem *serdes_vaddr;
+ struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
+ u8 sfp_prsnt;
+ u8 cpld_led_value;
+ u8 mac_id;
+
+ u8 link;
+ u8 half_duplex;
+ u16 speed;
+ u16 max_speed;
+ u16 max_frm;
+ u16 tx_pause_frm_time;
+ u32 if_support;
+ u64 txpkt_for_led;
+ u64 rxpkt_for_led;
+ enum hnae_port_type mac_type;
+ phy_interface_t phy_if;
+ enum hnae_loop loop_mode;
+
+ struct device_node *phy_node;
+
+ struct mac_hw_stats hw_stats;
+};
+
+struct mac_driver {
+ /*init Mac when init nic or dsaf*/
+ void (*mac_init)(void *mac_drv);
+ /*remove mac when remove nic or dsaf*/
+ void (*mac_free)(void *mac_drv);
+ /*enable mac when enable nic or dsaf*/
+ void (*mac_enable)(void *mac_drv, enum mac_commom_mode mode);
+ /*disable mac when disable nic or dsaf*/
+ void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
+ /* config mac address*/
+ void (*set_mac_addr)(void *mac_drv, char *mac_addr);
+ /*adjust mac mode of port,include speed and duplex*/
+ int (*adjust_link)(void *mac_drv, enum mac_speed speed,
+ u32 full_duplex);
+ /* config autoegotaite mode of port*/
+ void (*set_an_mode)(void *mac_drv, u8 enable);
+ /* config loopbank mode */
+ int (*config_loopback)(void *mac_drv, enum hnae_loop loop_mode,
+ u8 enable);
+ /* config mtu*/
+ void (*config_max_frame_length)(void *mac_drv, u16 newval);
+ /*config PAD and CRC enable */
+ void (*config_pad_and_crc)(void *mac_drv, u8 newval);
+ /* config duplex mode*/
+ void (*config_half_duplex)(void *mac_drv, u8 newval);
+ /*config tx pause time,if pause_time is zero,disable tx pause enable*/
+ void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time);
+ /*config rx pause enable*/
+ void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
+ /* config rx mode for promiscuous*/
+ int (*set_promiscuous)(void *mac_drv, u8 enable);
+ /* get mac id */
+ void (*mac_get_id)(void *mac_drv, u8 *mac_id);
+ void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
+
+ void (*autoneg_stat)(void *mac_drv, u32 *enable);
+ int (*set_pause_enable)(void *mac_drv, u32 rx_en, u32 tx_en);
+ void (*get_pause_enable)(void *mac_drv, u32 *rx_en, u32 *tx_en);
+ void (*get_link_status)(void *mac_drv, u32 *link_stat);
+ /* get the imporant regs*/
+ void (*get_regs)(void *mac_drv, void *data);
+ int (*get_regs_count)(void);
+ /* get strings name for ethtool statistic */
+ void (*get_strings)(u32 stringset, u8 *data);
+ /* get the number of strings*/
+ int (*get_sset_count)(int stringset);
+
+ /* get the statistic by ethtools*/
+ void (*get_ethtool_stats)(void *mac_drv, u64 *data);
+
+ /* get mac information */
+ void (*get_info)(void *mac_drv, struct mac_info *mac_info);
+
+ void (*update_stats)(void *mac_drv);
+
+ enum mac_mode mac_mode;
+ u8 mac_id;
+ struct hns_mac_cb *mac_cb;
+ void __iomem *io_base;
+ unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
+ unsigned int virt_dev_num;
+ struct device *dev;
+};
+
+struct mac_stats_string {
+ char desc[64];
+ unsigned long offset;
+};
+
+#define MAC_MAKE_MODE(interface, speed) (enum mac_mode)((interface) | (speed))
+#define MAC_INTERFACE_FROM_MODE(mode) (enum mac_intf)((mode) & 0xFFFF0000)
+#define MAC_SPEED_FROM_MODE(mode) (enum mac_speed)((mode) & 0x0000FFFF)
+#define MAC_STATS_FIELD_OFF(field) (offsetof(struct mac_hw_stats, field))
+
+static inline struct mac_driver *hns_mac_get_drv(
+ const struct hns_mac_cb *mac_cb)
+{
+ return (struct mac_driver *)(mac_cb->priv.mac);
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb,
+ struct mac_params *mac_param);
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
+ struct mac_params *mac_param);
+
+int hns_mac_init(struct dsaf_device *dsaf_dev);
+void mac_adjust_link(struct net_device *net_dev);
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+ u32 port_num, char *addr, u8 en);
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, u8 en);
+void hns_mac_start(struct hns_mac_cb *mac_cb);
+void hns_mac_stop(struct hns_mac_cb *mac_cb);
+int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac);
+void hns_mac_uninit(struct dsaf_device *dsaf_dev);
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
+void hns_mac_reset(struct hns_mac_cb *mac_cb);
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+ u8 *auto_neg, u16 *speed, u8 *duplex);
+phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb);
+int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+ enum hnae_loop loop, int en);
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data);
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data);
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset);
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data);
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
+void hns_set_led_opt(struct hns_mac_cb *mac_cb);
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
new file mode 100644
index 000000000000..2a98eba660c0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -0,0 +1,2474 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_rcb.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_mac.h"
+
+const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+ [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
+ [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
+ [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+};
+
+int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+{
+ int ret, i;
+ u32 desc_num;
+ u32 buf_size;
+ const char *name, *mode_str;
+ struct device_node *np = dsaf_dev->dev->of_node;
+
+ if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v2"))
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ else
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+
+ ret = of_property_read_string(np, "dsa_name", &name);
+ if (ret) {
+ dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret);
+ return ret;
+ }
+ strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE);
+ dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0';
+
+ ret = of_property_read_string(np, "mode", &mode_str);
+ if (ret) {
+ dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
+ return ret;
+ }
+ for (i = 0; i < DSAF_MODE_MAX; i++) {
+ if (g_dsaf_mode_match[i] &&
+ !strcmp(mode_str, g_dsaf_mode_match[i]))
+ break;
+ }
+ if (i >= DSAF_MODE_MAX ||
+ i == DSAF_MODE_INVALID || i == DSAF_MODE_ENABLE) {
+ dev_err(dsaf_dev->dev,
+ "%s prs mode str fail!\n", dsaf_dev->ae_dev.name);
+ return -EINVAL;
+ }
+ dsaf_dev->dsaf_mode = (enum dsaf_mode)i;
+
+ if (dsaf_dev->dsaf_mode > DSAF_MODE_ENABLE)
+ dsaf_dev->dsaf_en = HRD_DSAF_NO_DSAF_MODE;
+ else
+ dsaf_dev->dsaf_en = HRD_DSAF_MODE;
+
+ if ((i == DSAF_MODE_ENABLE_16VM) ||
+ (i == DSAF_MODE_DISABLE_2PORT_8VM) ||
+ (i == DSAF_MODE_DISABLE_6PORT_2VM))
+ dsaf_dev->dsaf_tc_mode = HRD_DSAF_8TC_MODE;
+ else
+ dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
+
+ dsaf_dev->sc_base = of_iomap(np, 0);
+ if (!dsaf_dev->sc_base) {
+ dev_err(dsaf_dev->dev,
+ "%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name);
+ ret = -ENOMEM;
+ goto unmap_base_addr;
+ }
+
+ dsaf_dev->sds_base = of_iomap(np, 1);
+ if (!dsaf_dev->sds_base) {
+ dev_err(dsaf_dev->dev,
+ "%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name);
+ ret = -ENOMEM;
+ goto unmap_base_addr;
+ }
+
+ dsaf_dev->ppe_base = of_iomap(np, 2);
+ if (!dsaf_dev->ppe_base) {
+ dev_err(dsaf_dev->dev,
+ "%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name);
+ ret = -ENOMEM;
+ goto unmap_base_addr;
+ }
+
+ dsaf_dev->io_base = of_iomap(np, 3);
+ if (!dsaf_dev->io_base) {
+ dev_err(dsaf_dev->dev,
+ "%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name);
+ ret = -ENOMEM;
+ goto unmap_base_addr;
+ }
+
+ dsaf_dev->cpld_base = of_iomap(np, 4);
+ if (!dsaf_dev->cpld_base)
+ dev_dbg(dsaf_dev->dev, "NO CPLD ADDR");
+
+ ret = of_property_read_u32(np, "desc-num", &desc_num);
+ if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
+ desc_num > HNS_DSAF_MAX_DESC_CNT) {
+ dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
+ desc_num, ret);
+ goto unmap_base_addr;
+ }
+ dsaf_dev->desc_num = desc_num;
+
+ ret = of_property_read_u32(np, "buf-size", &buf_size);
+ if (ret < 0) {
+ dev_err(dsaf_dev->dev,
+ "get buf-size fail, ret=%d!\r\n", ret);
+ goto unmap_base_addr;
+ }
+ dsaf_dev->buf_size = buf_size;
+
+ dsaf_dev->buf_size_type = hns_rcb_buf_size2type(buf_size);
+ if (dsaf_dev->buf_size_type < 0) {
+ dev_err(dsaf_dev->dev,
+ "buf_size(%d) is wrong!\n", buf_size);
+ goto unmap_base_addr;
+ }
+
+ if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
+ dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
+ else
+ dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
+
+ return 0;
+
+unmap_base_addr:
+ if (dsaf_dev->io_base)
+ iounmap(dsaf_dev->io_base);
+ if (dsaf_dev->ppe_base)
+ iounmap(dsaf_dev->ppe_base);
+ if (dsaf_dev->sds_base)
+ iounmap(dsaf_dev->sds_base);
+ if (dsaf_dev->sc_base)
+ iounmap(dsaf_dev->sc_base);
+ if (dsaf_dev->cpld_base)
+ iounmap(dsaf_dev->cpld_base);
+ return ret;
+}
+
+static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
+{
+ if (dsaf_dev->io_base)
+ iounmap(dsaf_dev->io_base);
+
+ if (dsaf_dev->ppe_base)
+ iounmap(dsaf_dev->ppe_base);
+
+ if (dsaf_dev->sds_base)
+ iounmap(dsaf_dev->sds_base);
+
+ if (dsaf_dev->sc_base)
+ iounmap(dsaf_dev->sc_base);
+
+ if (dsaf_dev->cpld_base)
+ iounmap(dsaf_dev->cpld_base);
+}
+
+/**
+ * hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_SBM_INIT_S, 1);
+}
+
+/**
+ * hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce
+ * @dsaf_id: dsa fabric id
+ * @hns_dsaf_reg_cnt_clr_ce: config value
+ */
+static void
+hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_DSA_REG_CNT_CLR_CE_REG,
+ DSAF_CNT_CLR_CE_S, reg_cnt_clr_ce);
+}
+
+/**
+ * hns_ppe_qid_cfg - config ppe qid
+ * @dsaf_id: dsa fabric id
+ * @pppe_qid_cfg: value array
+ */
+static void
+hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_PPE_QID_CFG_0_REG + 0x0004 * i,
+ DSAF_PPE_QID_CFG_M, DSAF_PPE_QID_CFG_S,
+ qid_cfg);
+ }
+}
+
+static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+ u16 max_q_per_vf, max_vfn;
+ u32 q_id, q_num_per_port;
+ u32 i;
+
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
+ HNS_DSAF_COMM_SERVICE_NW_IDX,
+ &max_vfn, &max_q_per_vf);
+ q_num_per_port = max_vfn * max_q_per_vf;
+
+ for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_MIX_DEF_QID_0_REG + 0x0004 * i,
+ 0xff, 0, q_id);
+ q_id += q_num_per_port;
+ }
+}
+
+/**
+ * hns_dsaf_sw_port_type_cfg - cfg sw type
+ * @dsaf_id: dsa fabric id
+ * @psw_port_type: array
+ */
+static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
+ enum dsaf_sw_port_type port_type)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_SW_PORT_TYPE_0_REG + 0x0004 * i,
+ DSAF_SW_PORT_TYPE_M, DSAF_SW_PORT_TYPE_S,
+ port_type);
+ }
+}
+
+/**
+ * hns_dsaf_stp_port_type_cfg - cfg stp type
+ * @dsaf_id: dsa fabric id
+ * @pstp_port_type: array
+ */
+static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
+ enum dsaf_stp_port_type port_type)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_STP_PORT_TYPE_0_REG + 0x0004 * i,
+ DSAF_STP_PORT_TYPE_M, DSAF_STP_PORT_TYPE_S,
+ port_type);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_cfg - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_cfg;
+ u32 i;
+
+ for (i = 0; i < DSAF_SBM_NUM; i++) {
+ o_sbm_cfg = dsaf_read_dev(dsaf_dev,
+ DSAF_SBM_CFG_REG_0_REG + 0x80 * i);
+ dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1);
+ dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_SHCUT_EN_S, 0);
+ dsaf_write_dev(dsaf_dev,
+ DSAF_SBM_CFG_REG_0_REG + 0x80 * i, o_sbm_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_cfg_mib_en - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
+{
+ u32 sbm_cfg_mib_en;
+ u32 i;
+ u32 reg;
+ u32 read_cnt;
+
+ for (i = 0; i < DSAF_SBM_NUM; i++) {
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1);
+ }
+
+ /* waitint for all sbm enable finished */
+ for (i = 0; i < DSAF_SBM_NUM; i++) {
+ read_cnt = 0;
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ do {
+ udelay(1);
+ sbm_cfg_mib_en = dsaf_get_dev_bit(
+ dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S);
+ read_cnt++;
+ } while (sbm_cfg_mib_en == 0 &&
+ read_cnt < DSAF_CFG_READ_CNT);
+
+ if (sbm_cfg_mib_en == 0) {
+ dev_err(dsaf_dev->dev,
+ "sbm_cfg_mib_en fail,%s,sbm_num=%d\n",
+ dsaf_dev->ae_dev.name, i);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_sbm_bp_wl_cfg - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_bp_cfg0;
+ u32 o_sbm_bp_cfg1;
+ u32 o_sbm_bp_cfg2;
+ u32 o_sbm_bp_cfg3;
+ u32 reg;
+ u32 i;
+
+ /* XGE */
+ for (i = 0; i < DSAF_XGE_NUM; i++) {
+ reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg0 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512);
+ dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg0);
+
+ reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg1 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg1);
+
+ reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 104);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+
+ reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg3,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110);
+ dsaf_set_field(o_sbm_bp_cfg3,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3);
+
+ /* for no enable pfc mode */
+ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg3,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128);
+ dsaf_set_field(o_sbm_bp_cfg3,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3);
+ }
+
+ /* PPE */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 10);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+ }
+
+ /* RoCEE */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+ }
+}
+
+/**
+ * hns_dsaf_voq_bp_all_thrd_cfg - voq
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 voq_bp_all_thrd;
+ u32 i;
+
+ for (i = 0; i < DSAF_VOQ_NUM; i++) {
+ voq_bp_all_thrd = dsaf_read_dev(
+ dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i);
+ if (i < DSAF_XGE_NUM) {
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_S, 930);
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_UPTHRD_M,
+ DSAF_VOQ_BP_ALL_UPTHRD_S, 950);
+ } else {
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_S, 220);
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_UPTHRD_M,
+ DSAF_VOQ_BP_ALL_UPTHRD_S, 230);
+ }
+ dsaf_write_dev(
+ dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i,
+ voq_bp_all_thrd);
+ }
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_data: addr
+ */
+static void hns_dsaf_tbl_tcam_data_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_LOW_0_REG,
+ ptbl_tcam_data->tbl_tcam_data_low);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_HIGH_0_REG,
+ ptbl_tcam_data->tbl_tcam_data_high);
+}
+
+/**
+ * dsaf_tbl_tcam_mcast_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_mcast: addr
+ */
+static void hns_dsaf_tbl_tcam_mcast_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_mcast_cfg *mcast)
+{
+ u32 mcast_cfg4;
+
+ mcast_cfg4 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S,
+ mcast->tbl_mcast_item_vld);
+ dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_OLD_EN_S,
+ mcast->tbl_mcast_old_en);
+ dsaf_set_field(mcast_cfg4, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+ DSAF_TBL_MCAST_CFG4_VM128_112_S,
+ mcast->tbl_mcast_port_msk[4]);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, mcast_cfg4);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG,
+ mcast->tbl_mcast_port_msk[3]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG,
+ mcast->tbl_mcast_port_msk[2]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG,
+ mcast->tbl_mcast_port_msk[1]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG,
+ mcast->tbl_mcast_port_msk[0]);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_ucast_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_ucast: addr
+ */
+static void hns_dsaf_tbl_tcam_ucast_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_ucast_cfg *tbl_tcam_ucast)
+{
+ u32 ucast_cfg1;
+
+ ucast_cfg1 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S,
+ tbl_tcam_ucast->tbl_ucast_mac_discard);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_ITEM_VLD_S,
+ tbl_tcam_ucast->tbl_ucast_item_vld);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OLD_EN_S,
+ tbl_tcam_ucast->tbl_ucast_old_en);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_DVC_S,
+ tbl_tcam_ucast->tbl_ucast_dvc);
+ dsaf_set_field(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_S,
+ tbl_tcam_ucast->tbl_ucast_out_port);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG, ucast_cfg1);
+}
+
+/**
+ * hns_dsaf_tbl_line_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_lin: addr
+ */
+static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_line_cfg *tbl_lin)
+{
+ u32 tbl_line;
+
+ tbl_line = dsaf_read_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG);
+ dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_MAC_DISCARD_S,
+ tbl_lin->tbl_line_mac_discard);
+ dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_DVC_S,
+ tbl_lin->tbl_line_dvc);
+ dsaf_set_field(tbl_line, DSAF_TBL_LINE_CFG_OUT_PORT_M,
+ DSAF_TBL_LINE_CFG_OUT_PORT_S,
+ tbl_lin->tbl_line_out_port);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_mcast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_line_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 tbl_pul;
+
+ tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+ dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_mcast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_mcast_pul(
+ struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_ucast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_ucast_pul(
+ struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
+}
+
+/**
+ * hns_dsaf_tbl_stat_en - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_stat_en: addr
+ */
+static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_ctrl;
+
+ o_tbl_ctrl = dsaf_read_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_UC_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_MC_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_BC_LKUP_NUM_EN_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG, o_tbl_ctrl);
+}
+
+/**
+ * hns_dsaf_rocee_bp_en - rocee back press enable
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
+ DSAF_FC_XGE_TX_PAUSE_S, 1);
+}
+
+/* set msk for dsaf exception irq*/
+static void hns_dsaf_int_xge_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 mask_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_INT_MSK_0_REG + 0x4 * chnn_num, mask_set);
+}
+
+static void hns_dsaf_int_ppe_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_PPE_INT_MSK_0_REG + 0x4 * chnn_num, msk_set);
+}
+
+static void hns_dsaf_int_rocee_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_ROCEE_INT_MSK_0_REG + 0x4 * chnn, msk_set);
+}
+
+static void
+hns_dsaf_int_tbl_msk_set(struct dsaf_device *dsaf_dev, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_MSK_0_REG, msk_set);
+}
+
+/* clr dsaf exception irq*/
+static void hns_dsaf_int_xge_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_INT_SRC_0_REG + 0x4 * chnn_num, int_src);
+}
+
+static void hns_dsaf_int_ppe_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_PPE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_rocee_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_ROCEE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev,
+ u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_SRC_0_REG, int_src);
+}
+
+/**
+ * hns_dsaf_single_line_tbl_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address:
+ * @ptbl_line:
+ */
+static void hns_dsaf_single_line_tbl_cfg(
+ struct dsaf_device *dsaf_dev,
+ u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
+{
+ /*Write Addr*/
+ hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
+
+ /*Write Line*/
+ hns_dsaf_tbl_line_cfg(dsaf_dev, ptbl_line);
+
+ /*Write Plus*/
+ hns_dsaf_tbl_line_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_uc_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ /*Write Tcam Data*/
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+ /*Write Tcam Ucast*/
+ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mcast,
+ */
+static void hns_dsaf_tcam_mc_cfg(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ /*Write Tcam Data*/
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+ /*Write Tcam Mcast*/
+ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_mc_invld - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ */
+static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
+{
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*write tcam mcast*/
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, 0);
+
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_uc_get - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ * @ptbl_tcam_data
+ * @ptbl_tcam_ucast
+ */
+static void hns_dsaf_tcam_uc_get(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+ u32 tcam_read_data0;
+ u32 tcam_read_data4;
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*read tcam item puls*/
+ hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+ /*read tcam data*/
+ ptbl_tcam_data->tbl_tcam_data_high
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+
+ /*read tcam mcast*/
+ tcam_read_data0 = dsaf_read_dev(dsaf_dev,
+ DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ tcam_read_data4 = dsaf_read_dev(dsaf_dev,
+ DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+
+ ptbl_tcam_ucast->tbl_ucast_item_vld
+ = dsaf_get_bit(tcam_read_data4,
+ DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+ ptbl_tcam_ucast->tbl_ucast_old_en
+ = dsaf_get_bit(tcam_read_data4, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+ ptbl_tcam_ucast->tbl_ucast_mac_discard
+ = dsaf_get_bit(tcam_read_data0,
+ DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S);
+ ptbl_tcam_ucast->tbl_ucast_out_port
+ = dsaf_get_field(tcam_read_data0,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
+ ptbl_tcam_ucast->tbl_ucast_dvc
+ = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
+}
+
+/**
+ * hns_dsaf_tcam_mc_get - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ * @ptbl_tcam_data
+ * @ptbl_tcam_ucast
+ */
+static void hns_dsaf_tcam_mc_get(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+ u32 data_tmp;
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*read tcam item puls*/
+ hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+ /*read tcam data*/
+ ptbl_tcam_data->tbl_tcam_data_high =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+
+ /*read tcam mcast*/
+ ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[1] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[2] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[3] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+
+ data_tmp = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_item_vld =
+ dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+ ptbl_tcam_mcast->tbl_mcast_old_en =
+ dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
+ dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+ DSAF_TBL_MCAST_CFG4_VM128_112_S);
+}
+
+/**
+ * hns_dsaf_tbl_line_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ /* defaultly set all lineal mac table entry resulting discard */
+ struct dsaf_tbl_line_cfg tbl_line[] = {{1, 0, 0} };
+
+ for (i = 0; i < DSAF_LINE_SUM; i++)
+ hns_dsaf_single_line_tbl_cfg(dsaf_dev, i, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ struct dsaf_tbl_tcam_data tcam_data[] = {{0, 0} };
+ struct dsaf_tbl_tcam_ucast_cfg tcam_ucast[] = {{0, 0, 0, 0, 0} };
+
+ /*tcam tbl*/
+ for (i = 0; i < DSAF_TCAM_SUM; i++)
+ hns_dsaf_tcam_uc_cfg(dsaf_dev, i, tcam_data, tcam_ucast);
+}
+
+/**
+ * hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg
+ * @mac_cb: mac contrl block
+ */
+static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
+ int mac_id, int en)
+{
+ if (!en)
+ dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0);
+ else
+ dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_id: dsa fabric id
+ * @dsaf_mode
+ */
+static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ u32 o_dsaf_cfg;
+
+ o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_TC_MODE_S, dsaf_dev->dsaf_tc_mode);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_CRC_EN_S, 0);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_MIX_MODE_S, 0);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_LOCA_ADDR_EN_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_CFG_0_REG, o_dsaf_cfg);
+
+ hns_dsaf_reg_cnt_clr_ce(dsaf_dev, 1);
+ hns_dsaf_stp_port_type_cfg(dsaf_dev, DSAF_STP_PORT_TYPE_FORWARD);
+
+ /* set 22 queue per tx ppe engine, only used in switch mode */
+ hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE);
+
+ /* set promisc def queue id */
+ hns_dsaf_mix_def_qid_cfg(dsaf_dev);
+
+ /* in non switch mode, set all port to access mode */
+ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
+
+ /*set dsaf pfc to 0 for parseing rx pause*/
+ for (i = 0; i < DSAF_COMM_CHN; i++)
+ hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+
+ /*msk and clr exception irqs */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ hns_dsaf_int_xge_src_clr(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_ppe_src_clr(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_rocee_src_clr(dsaf_dev, i, 0xfffffffful);
+
+ hns_dsaf_int_xge_msk_set(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_ppe_msk_set(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_rocee_msk_set(dsaf_dev, i, 0xfffffffful);
+ }
+ hns_dsaf_int_tbl_src_clr(dsaf_dev, 0xfffffffful);
+ hns_dsaf_int_tbl_msk_set(dsaf_dev, 0xfffffffful);
+}
+
+/**
+ * hns_dsaf_inode_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
+{
+ u32 reg;
+ u32 tc_cfg;
+ u32 i;
+
+ if (dsaf_dev->dsaf_tc_mode == HRD_DSAF_4TC_MODE)
+ tc_cfg = HNS_DSAF_I4TC_CFG;
+ else
+ tc_cfg = HNS_DSAF_I8TC_CFG;
+
+ for (i = 0; i < DSAF_INODE_NUM; i++) {
+ reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i;
+ dsaf_set_dev_field(dsaf_dev, reg, DSAF_INODE_IN_PORT_NUM_M,
+ DSAF_INODE_IN_PORT_NUM_S, i % DSAF_XGE_NUM);
+
+ reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i;
+ dsaf_write_dev(dsaf_dev, reg, tc_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
+{
+ u32 flag;
+ u32 cnt = 0;
+ int ret;
+
+ hns_dsaf_sbm_bp_wl_cfg(dsaf_dev);
+
+ /* enable sbm chanel, disable sbm chanel shcut function*/
+ hns_dsaf_sbm_cfg(dsaf_dev);
+
+ /* enable sbm mib */
+ ret = hns_dsaf_sbm_cfg_mib_en(dsaf_dev);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_sbm_cfg_mib_en fail,%s, ret=%d\n",
+ dsaf_dev->ae_dev.name, ret);
+ return ret;
+ }
+
+ /* enable sbm initial link sram */
+ hns_dsaf_sbm_link_sram_init_en(dsaf_dev);
+
+ do {
+ usleep_range(200, 210);/*udelay(200);*/
+ flag = dsaf_read_dev(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG);
+ cnt++;
+ } while (flag != DSAF_SRAM_INIT_FINISH_FLAG && cnt < DSAF_CFG_READ_CNT);
+
+ if (flag != DSAF_SRAM_INIT_FINISH_FLAG) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n",
+ dsaf_dev->ae_dev.name, flag, cnt);
+ return -ENODEV;
+ }
+
+ hns_dsaf_rocee_bp_en(dsaf_dev);
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_tbl_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
+{
+ hns_dsaf_tbl_stat_en(dsaf_dev);
+
+ hns_dsaf_tbl_tcam_init(dsaf_dev);
+ hns_dsaf_tbl_line_init(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_voq_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev)
+{
+ hns_dsaf_voq_bp_all_thrd_cfg(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_init_hw - init dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
+{
+ int ret;
+
+ dev_dbg(dsaf_dev->dev,
+ "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
+
+ hns_dsaf_rst(dsaf_dev, 0);
+ mdelay(10);
+ hns_dsaf_rst(dsaf_dev, 1);
+
+ hns_dsaf_comm_init(dsaf_dev);
+
+ /*init XBAR_INODE*/
+ hns_dsaf_inode_init(dsaf_dev);
+
+ /*init SBM*/
+ ret = hns_dsaf_sbm_init(dsaf_dev);
+ if (ret)
+ return ret;
+
+ /*init TBL*/
+ hns_dsaf_tbl_init(dsaf_dev);
+
+ /*init VOQ*/
+ hns_dsaf_voq_init(dsaf_dev);
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_remove_hw - uninit dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
+{
+ /*reset*/
+ hns_dsaf_rst(dsaf_dev, 0);
+}
+
+/**
+ * hns_dsaf_init - init dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ * retuen 0 - success , negative --fail
+ */
+static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ u32 i;
+ int ret;
+
+ ret = hns_dsaf_init_hw(dsaf_dev);
+ if (ret)
+ return ret;
+
+ /* malloc mem for tcam mac key(vlan+mac) */
+ priv->soft_mac_tbl = vzalloc(sizeof(*priv->soft_mac_tbl)
+ * DSAF_TCAM_SUM);
+ if (!priv->soft_mac_tbl) {
+ ret = -ENOMEM;
+ goto remove_hw;
+ }
+
+ /*all entry invall */
+ for (i = 0; i < DSAF_TCAM_SUM; i++)
+ (priv->soft_mac_tbl + i)->index = DSAF_INVALID_ENTRY_IDX;
+
+ return 0;
+
+remove_hw:
+ hns_dsaf_remove_hw(dsaf_dev);
+ return ret;
+}
+
+/**
+ * hns_dsaf_free - free dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_free(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+
+ hns_dsaf_remove_hw(dsaf_dev);
+
+ /* free all mac mem */
+ vfree(priv->soft_mac_tbl);
+ priv->soft_mac_tbl = NULL;
+}
+
+/**
+ * hns_dsaf_find_soft_mac_entry - find dsa fabric soft entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: mac entry struct pointer
+ */
+static u16 hns_dsaf_find_soft_mac_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_tbl_tcam_key *mac_key)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u32 i;
+
+ soft_mac_entry = priv->soft_mac_tbl;
+ for (i = 0; i < DSAF_TCAM_SUM; i++) {
+ /* invall tab entry */
+ if ((soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX) &&
+ (soft_mac_entry->tcam_key.high.val == mac_key->high.val) &&
+ (soft_mac_entry->tcam_key.low.val == mac_key->low.val))
+ /* return find result --soft index */
+ return soft_mac_entry->index;
+
+ soft_mac_entry++;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_find_empty_mac_entry - search dsa fabric soft empty-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u32 i;
+
+ soft_mac_entry = priv->soft_mac_tbl;
+ for (i = 0; i < DSAF_TCAM_SUM; i++) {
+ /* inv all entry */
+ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+ /* return find result --soft index */
+ return i;
+
+ soft_mac_entry++;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_set_mac_key - set mac key
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: tcam key pointer
+ * @vlan_id: vlan id
+ * @in_port_num: input port num
+ * @addr: mac addr
+ */
+static void hns_dsaf_set_mac_key(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_tbl_tcam_key *mac_key, u16 vlan_id, u8 in_port_num,
+ u8 *addr)
+{
+ u8 port;
+
+ if (dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE)
+ /*DSAF mode : in port id fixed 0*/
+ port = 0;
+ else
+ /*non-dsaf mode*/
+ port = in_port_num;
+
+ mac_key->high.bits.mac_0 = addr[0];
+ mac_key->high.bits.mac_1 = addr[1];
+ mac_key->high.bits.mac_2 = addr[2];
+ mac_key->high.bits.mac_3 = addr[3];
+ mac_key->low.bits.mac_4 = addr[4];
+ mac_key->low.bits.mac_5 = addr[5];
+ mac_key->low.bits.vlan = vlan_id;
+ mac_key->low.bits.port = port;
+}
+
+/**
+ * hns_dsaf_set_mac_uc_entry - set mac uc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: uc-mac entry
+ */
+int hns_dsaf_set_mac_uc_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_tbl_tcam_ucast_cfg mac_data;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+ /* mac addr check */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+ MAC_IS_BROADCAST(mac_entry->addr) ||
+ MAC_IS_MULTICAST(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr[0],
+ mac_entry->addr[1], mac_entry->addr[2],
+ mac_entry->addr[3], mac_entry->addr[4],
+ mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /* entry ie exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if has not inv entry,find a empty entry */
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /* has not empty,return error */
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /* config hardware entry */
+ mac_data.tbl_ucast_item_vld = 1;
+ mac_data.tbl_ucast_mac_discard = 0;
+ mac_data.tbl_ucast_old_en = 0;
+ /* default config dvc to 0 */
+ mac_data.tbl_ucast_dvc = 0;
+ mac_data.tbl_ucast_out_port = mac_entry->port_num;
+ hns_dsaf_tcam_uc_cfg(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+ /* config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_set_mac_mc_entry - set mac mc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_set_mac_mc_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+
+ /* mac addr check */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr[0],
+ mac_entry->addr[1], mac_entry->addr[2],
+ mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key,
+ mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /* entry ie exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot, find enpty entry*/
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot empty, error*/
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+
+ /* config hardware entry */
+ memset(mac_data.tbl_mcast_port_msk,
+ 0, sizeof(mac_data.tbl_mcast_port_msk));
+ } else {
+ /* config hardware entry */
+ hns_dsaf_tcam_mc_get(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+ }
+ mac_data.tbl_mcast_old_en = 0;
+ mac_data.tbl_mcast_item_vld = 1;
+ dsaf_set_field(mac_data.tbl_mcast_port_msk[0],
+ 0x3F, 0, mac_entry->port_mask[0]);
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ hns_dsaf_tcam_mc_cfg(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+ /* config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_add_mac_mc_port - add mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+ int mskid;
+
+ /*chechk mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+ mac_entry->addr[0], mac_entry->addr[1],
+ mac_entry->addr[2], mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(
+ dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ memset(&mac_data, 0, sizeof(struct dsaf_tbl_tcam_mcast_cfg));
+
+ /*check exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot , find a empty*/
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot empty, error*/
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val);
+ return -EINVAL;
+ }
+ } else {
+ /*if exist, add in */
+ hns_dsaf_tcam_mc_get(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+ }
+ /* config hardware entry */
+ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+ mskid = mac_entry->port_num;
+ } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = mac_entry->port_num -
+ DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+ } else {
+ dev_err(dsaf_dev->dev,
+ "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_entry->port_num,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1);
+ mac_data.tbl_mcast_old_en = 0;
+ mac_data.tbl_mcast_item_vld = 1;
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ hns_dsaf_tcam_mc_cfg(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+ /*config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_entry - del mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @vlan_id: vlian id
+ * @in_port_num: input port num
+ * @addr : mac addr
+ */
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+ u8 in_port_num, u8 *addr)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
+ dev_err(dsaf_dev->dev,
+ "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, addr);
+
+ /*exist ?*/
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*not exist, error */
+ dev_err(dsaf_dev->dev,
+ "del_mac_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dev_dbg(dsaf_dev->dev,
+ "del_mac_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*do del opt*/
+ hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+ /*del soft emtry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_mc_port - del mac mc- port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ u16 vlan_id;
+ u8 in_port_num;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+ int mskid;
+ const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
+
+ if (!(void *)mac_entry) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_del_mac_mc_port mac_entry is NULL\n");
+ return -EINVAL;
+ }
+
+ /*get key info*/
+ vlan_id = mac_entry->in_vlan_id;
+ in_port_num = mac_entry->in_port_num;
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+ mac_entry->addr[0], mac_entry->addr[1],
+ mac_entry->addr[2], mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num,
+ mac_entry->addr);
+
+ /*check is exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*find none */
+ dev_err(dsaf_dev->dev,
+ "find_soft_mac_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "del_mac_mc_port, %s key(%#x:%#x) index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*read entry*/
+ hns_dsaf_tcam_mc_get(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+
+ /*del the port*/
+ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+ mskid = mac_entry->port_num;
+ } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = mac_entry->port_num -
+ DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+ } else {
+ dev_err(dsaf_dev->dev,
+ "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_entry->port_num,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 0);
+
+ /*check non port, do del entry */
+ if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk,
+ sizeof(mac_data.tbl_mcast_port_msk))) {
+ hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+ /* del soft entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+ } else { /* not zer, just del port, updata*/
+ hns_dsaf_tcam_mc_cfg(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+ }
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_uc_entry - get mac uc entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+
+ struct dsaf_tbl_tcam_ucast_cfg mac_data;
+
+ /* check macaddr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+ MAC_IS_BROADCAST(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac_entry->addr[0], mac_entry->addr[1],
+ mac_entry->addr[2], mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /*check exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*find none, error */
+ dev_err(dsaf_dev->dev,
+ "get_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dev_dbg(dsaf_dev->dev,
+ "get_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*read entry*/
+ hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+ mac_entry->port_num = mac_data.tbl_ucast_out_port;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_mc_entry - get mac mc entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+ MAC_IS_BROADCAST(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac_entry->addr[0], mac_entry->addr[1],
+ mac_entry->addr[2], mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /*check exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /* find none, error */
+ dev_err(dsaf_dev->dev,
+ "get_mac_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val);
+ return -EINVAL;
+ }
+ dev_dbg(dsaf_dev->dev,
+ "get_mac_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*read entry */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+
+ mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
+ return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_entry_by_index - get mac entry by tab index
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @entry_index: tab entry index
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_entry_by_index(
+ struct dsaf_device *dsaf_dev,
+ u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+ struct dsaf_drv_tbl_tcam_key mac_key;
+
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_tbl_tcam_ucast_cfg mac_uc_data;
+ char mac_addr[MAC_NUM_OCTETS_PER_ADDR] = {0};
+
+ if (entry_index >= DSAF_TCAM_SUM) {
+ /* find none, del error */
+ dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n",
+ dsaf_dev->ae_dev.name);
+ return -EINVAL;
+ }
+
+ /* mc entry, do read opt */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+
+ mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
+
+ /***get mac addr*/
+ mac_addr[0] = mac_key.high.bits.mac_0;
+ mac_addr[1] = mac_key.high.bits.mac_1;
+ mac_addr[2] = mac_key.high.bits.mac_2;
+ mac_addr[3] = mac_key.high.bits.mac_3;
+ mac_addr[4] = mac_key.low.bits.mac_4;
+ mac_addr[5] = mac_key.low.bits.mac_5;
+ /**is mc or uc*/
+ if (MAC_IS_MULTICAST((u8 *)mac_addr) ||
+ MAC_IS_L3_MULTICAST((u8 *)mac_addr)) {
+ /**mc donot do*/
+ } else {
+ /*is not mc, just uc... */
+ hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key,
+ &mac_uc_data);
+ mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port);
+ }
+
+ return 0;
+}
+
+static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct dsaf_device *dsaf_dev;
+
+ dsaf_dev = devm_kzalloc(dev,
+ sizeof(*dsaf_dev) + sizeof_priv, GFP_KERNEL);
+ if (unlikely(!dsaf_dev)) {
+ dsaf_dev = ERR_PTR(-ENOMEM);
+ } else {
+ dsaf_dev->dev = dev;
+ dev_set_drvdata(dev, dsaf_dev);
+ }
+
+ return dsaf_dev;
+}
+
+/**
+ * hns_dsaf_free_dev - free dev mem
+ * @dev: struct device pointer
+ */
+static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
+{
+ (void)dev_set_drvdata(dsaf_dev->dev, NULL);
+}
+
+/**
+ * dsaf_pfc_unit_cnt - set pfc unit count
+ * @dsaf_id: dsa fabric id
+ * @pport_rate: value array
+ * @pdsaf_pfc_unit_cnt: value array
+ */
+static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate)
+{
+ u32 unit_cnt;
+
+ switch (rate) {
+ case DSAF_PORT_RATE_10000:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+ break;
+ case DSAF_PORT_RATE_1000:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+ break;
+ case DSAF_PORT_RATE_2500:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+ break;
+ default:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+ }
+
+ dsaf_set_dev_field(dsaf_dev,
+ (DSAF_PFC_UNIT_CNT_0_REG + 0x4 * (u64)mac_id),
+ DSAF_PFC_UNINT_CNT_M, DSAF_PFC_UNINT_CNT_S,
+ unit_cnt);
+}
+
+/**
+ * dsaf_port_work_rate_cfg - fifo
+ * @dsaf_id: dsa fabric id
+ * @xge_ge_work_mode
+ */
+void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate_mode)
+{
+ u32 port_work_mode;
+
+ port_work_mode = dsaf_read_dev(
+ dsaf_dev, DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id);
+
+ if (rate_mode == DSAF_PORT_RATE_10000)
+ dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 1);
+ else
+ dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 0);
+
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id,
+ port_work_mode);
+
+ hns_dsaf_pfc_unit_cnt(dsaf_dev, mac_id, rate_mode);
+}
+
+/**
+ * hns_dsaf_fix_mac_mode - dsaf modify mac mode
+ * @mac_cb: mac contrl block
+ */
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
+{
+ enum dsaf_port_rate_mode mode;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ int mac_id = mac_cb->mac_id;
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return;
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mode = DSAF_PORT_RATE_10000;
+ else
+ mode = DSAF_PORT_RATE_1000;
+
+ hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
+}
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
+{
+ struct dsaf_hw_stats *hw_stats
+ = &dsaf_dev->hw_stats[node_num];
+
+ hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->man_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_DROP_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->crc_false += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_CRC_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->bp_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_BP_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rslt_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_RSLT_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->local_addr_false += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+ hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+ hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+
+ hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
+}
+
+/**
+ *hns_dsaf_get_regs - dump dsaf regs
+ *@dsaf_dev: dsaf device
+ *@data:data for value of regs
+ */
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
+{
+ u32 i = 0;
+ u32 j;
+ u32 *p = data;
+
+ /* dsaf common registers */
+ p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
+ p[1] = dsaf_read_dev(ddev, DSAF_CFG_0_REG);
+ p[2] = dsaf_read_dev(ddev, DSAF_ECC_ERR_INVERT_0_REG);
+ p[3] = dsaf_read_dev(ddev, DSAF_ABNORMAL_TIMEOUT_0_REG);
+ p[4] = dsaf_read_dev(ddev, DSAF_FSM_TIMEOUT_0_REG);
+ p[5] = dsaf_read_dev(ddev, DSAF_DSA_REG_CNT_CLR_CE_REG);
+ p[6] = dsaf_read_dev(ddev, DSAF_DSA_SBM_INF_FIFO_THRD_REG);
+ p[7] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_SEL_REG);
+ p[8] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_CNT_REG);
+
+ p[9] = dsaf_read_dev(ddev, DSAF_PFC_EN_0_REG + port * 4);
+ p[10] = dsaf_read_dev(ddev, DSAF_PFC_UNIT_CNT_0_REG + port * 4);
+ p[11] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+ p[12] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+ p[13] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+ p[14] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+ p[15] = dsaf_read_dev(ddev, DSAF_PPE_INT_MSK_0_REG + port * 4);
+ p[16] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_MSK_0_REG + port * 4);
+ p[17] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+ p[18] = dsaf_read_dev(ddev, DSAF_PPE_INT_SRC_0_REG + port * 4);
+ p[19] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_SRC_0_REG + port * 4);
+ p[20] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+ p[21] = dsaf_read_dev(ddev, DSAF_PPE_INT_STS_0_REG + port * 4);
+ p[22] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_STS_0_REG + port * 4);
+ p[23] = dsaf_read_dev(ddev, DSAF_PPE_QID_CFG_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+ p[24 + i] = dsaf_read_dev(ddev,
+ DSAF_SW_PORT_TYPE_0_REG + i * 4);
+
+ p[32] = dsaf_read_dev(ddev, DSAF_MIX_DEF_QID_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+ p[33 + i] = dsaf_read_dev(ddev,
+ DSAF_PORT_DEF_VLAN_0_REG + i * 4);
+
+ for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++)
+ p[41 + i] = dsaf_read_dev(ddev,
+ DSAF_VM_DEF_VLAN_0_REG + i * 4);
+
+ /* dsaf inode registers */
+ p[170] = dsaf_read_dev(ddev, DSAF_INODE_CUT_THROUGH_CFG_0_REG);
+
+ p[171] = dsaf_read_dev(ddev,
+ DSAF_INODE_ECC_ERR_ADDR_0_REG + port * 0x80);
+
+ for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[172 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_IN_PORT_NUM_0_REG + j * 0x80);
+ p[175 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_PRI_TC_CFG_0_REG + j * 0x80);
+ p[178 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BP_STATUS_0_REG + j * 0x80);
+ p[181 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_PAD_DISCARD_NUM_0_REG + j * 0x80);
+ p[184 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + j * 0x80);
+ p[187 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
+ p[190 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
+ p[193 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80);
+ p[196 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
+ p[199 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_DROP_NUM_0_REG + j * 0x80);
+ p[202 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_CRC_FALSE_NUM_0_REG + j * 0x80);
+ p[205 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BP_DISCARD_NUM_0_REG + j * 0x80);
+ p[208 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_RSLT_DISCARD_NUM_0_REG + j * 0x80);
+ p[211 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + j * 0x80);
+ p[214 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_VOQ_OVER_NUM_0_REG + j * 0x80);
+ p[217 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BD_SAVE_STATUS_0_REG + j * 4);
+ p[220 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
+ p[223 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
+ p[224 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
+ }
+
+ p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[228 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
+ }
+
+ p[231] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+
+ /* dsaf inode registers */
+ for (i = 0; i < DSAF_SBM_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[232 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_CFG_REG_0_REG + j * 0x80);
+ p[235 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
+ p[238 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
+ p[241 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
+ p[244 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
+ p[245 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
+ p[248 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
+ p[251 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
+ p[254 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
+ p[257 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
+ p[260 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INER_ST_0_REG + j * 0x80);
+ p[263 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
+ p[266 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
+ p[269 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
+ p[272 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
+ p[275 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
+ p[278 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
+ p[281 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
+ p[284 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
+ p[287 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
+ p[290 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
+ p[293 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
+ p[296 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
+ p[299 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
+ p[302 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
+ p[305 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
+ p[308 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
+ }
+
+ /* dsaf onode registers */
+ for (i = 0; i < DSAF_XOD_NUM; i++) {
+ p[311 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90);
+ p[319 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90);
+ p[327 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90);
+ p[335 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90);
+ p[343 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90);
+ p[351 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90);
+ }
+
+ p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+ p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+ p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+
+ for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[362 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_L_0_REG + j * 0x90);
+ p[365 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_H_0_REG + j * 0x90);
+ p[368 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
+ p[371 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
+ p[374 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
+ p[377 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
+ p[380 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
+ p[383 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
+ p[386 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
+ p[389 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
+ }
+
+ p[392] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[393] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[394] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
+ p[395] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
+ p[396] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
+ p[397] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
+ p[398] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
+ p[399] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
+ p[400] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[401] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[402] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[403] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[404] = dsaf_read_dev(ddev,
+ DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
+
+ /* dsaf voq registers */
+ for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
+ j = (i * DSAF_COMM_CHN + port) * 0x90;
+ p[405 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
+ p[408 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
+ p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+ p[414 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
+ p[417 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
+ p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+ p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+ p[426 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
+ p[429 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
+ p[432 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
+ p[435 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
+ p[438 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_BP_ALL_THRD_0_REG + j);
+ }
+
+ /* dsaf tbl registers */
+ p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+ p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+ p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+ p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+ p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+ p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+ p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+ p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+ p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+ p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+ p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+ p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+ p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+ p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+ p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+ j = i * 0x8;
+ p[464 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
+ p[465 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
+ }
+
+ p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+ p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+ p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+ p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+ p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+ p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+ p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+ p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+ p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+ p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+ p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+ p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+
+ /* dsaf other registers */
+ p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+ p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+ p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+ p[495] = dsaf_read_dev(ddev,
+ DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
+ p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+ p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+
+ /* mark end of dsaf regs */
+ for (i = 498; i < 504; i++)
+ p[i] = 0xdddddddd;
+}
+
+static char *hns_dsaf_get_node_stats_strings(char *data, int node)
+{
+ char *buff = data;
+
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+
+ return buff;
+}
+
+static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
+ int node_num)
+{
+ u64 *p = data;
+ struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
+
+ p[0] = hw_stats->pad_drop;
+ p[1] = hw_stats->man_pkts;
+ p[2] = hw_stats->rx_pkts;
+ p[3] = hw_stats->rx_pkt_id;
+ p[4] = hw_stats->rx_pause_frame;
+ p[5] = hw_stats->release_buf_num;
+ p[6] = hw_stats->sbm_drop;
+ p[7] = hw_stats->crc_false;
+ p[8] = hw_stats->bp_drop;
+ p[9] = hw_stats->rslt_drop;
+ p[10] = hw_stats->local_addr_false;
+ p[11] = hw_stats->vlan_drop;
+ p[12] = hw_stats->stp_drop;
+ p[13] = hw_stats->tx_pkts;
+
+ return &p[14];
+}
+
+/**
+ *hns_dsaf_get_stats - get dsaf statistic
+ *@ddev: dsaf device
+ *@data:statistic value
+ *@port: port num
+ */
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
+{
+ u64 *p = data;
+ int node_num = port;
+
+ /* for ge/xge node info */
+ p = hns_dsaf_get_node_stats(ddev, p, node_num);
+
+ /* for ppe node info */
+ node_num = port + DSAF_PPE_INODE_BASE;
+ (void)hns_dsaf_get_node_stats(ddev, p, node_num);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf string set count
+ *@stringset: type of values in data
+ *return dsaf string name count
+ */
+int hns_dsaf_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return DSAF_STATIC_NUM;
+
+ return 0;
+}
+
+/**
+ *hns_dsaf_get_strings - get dsaf string set
+ *@stringset:srting set index
+ *@data:strings name value
+ *@port:port index
+ */
+void hns_dsaf_get_strings(int stringset, u8 *data, int port)
+{
+ char *buff = (char *)data;
+ int node = port;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ /* for ge/xge node info */
+ buff = hns_dsaf_get_node_stats_strings(buff, node);
+
+ /* for ppe node info */
+ node = port + DSAF_PPE_INODE_BASE;
+ (void)hns_dsaf_get_node_stats_strings(buff, node);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf regs count
+ *return dsaf regs count
+ */
+int hns_dsaf_get_regs_count(void)
+{
+ return DSAF_DUMP_REGS_NUM;
+}
+
+/**
+ * dsaf_probe - probo dsaf dev
+ * @pdev: dasf platform device
+ * retuen 0 - success , negative --fail
+ */
+static int hns_dsaf_probe(struct platform_device *pdev)
+{
+ struct dsaf_device *dsaf_dev;
+ int ret;
+
+ dsaf_dev = hns_dsaf_alloc_dev(&pdev->dev, sizeof(struct dsaf_drv_priv));
+ if (IS_ERR(dsaf_dev)) {
+ ret = PTR_ERR(dsaf_dev);
+ dev_err(&pdev->dev,
+ "dsaf_probe dsaf_alloc_dev failed, ret = %#x!\n", ret);
+ return ret;
+ }
+
+ ret = hns_dsaf_get_cfg(dsaf_dev);
+ if (ret)
+ goto free_dev;
+
+ ret = hns_dsaf_init(dsaf_dev);
+ if (ret)
+ goto free_cfg;
+
+ ret = hns_mac_init(dsaf_dev);
+ if (ret)
+ goto uninit_dsaf;
+
+ ret = hns_ppe_init(dsaf_dev);
+ if (ret)
+ goto uninit_mac;
+
+ ret = hns_dsaf_ae_init(dsaf_dev);
+ if (ret)
+ goto uninit_ppe;
+
+ return 0;
+
+uninit_ppe:
+ hns_ppe_uninit(dsaf_dev);
+
+uninit_mac:
+ hns_mac_uninit(dsaf_dev);
+
+uninit_dsaf:
+ hns_dsaf_free(dsaf_dev);
+
+free_cfg:
+ hns_dsaf_free_cfg(dsaf_dev);
+
+free_dev:
+ hns_dsaf_free_dev(dsaf_dev);
+
+ return ret;
+}
+
+/**
+ * dsaf_remove - remove dsaf dev
+ * @pdev: dasf platform device
+ */
+static int hns_dsaf_remove(struct platform_device *pdev)
+{
+ struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev);
+
+ hns_dsaf_ae_uninit(dsaf_dev);
+
+ hns_ppe_uninit(dsaf_dev);
+
+ hns_mac_uninit(dsaf_dev);
+
+ hns_dsaf_free(dsaf_dev);
+
+ hns_dsaf_free_cfg(dsaf_dev);
+
+ hns_dsaf_free_dev(dsaf_dev);
+
+ return 0;
+}
+
+static const struct of_device_id g_dsaf_match[] = {
+ {.compatible = "hisilicon,hns-dsaf-v1"},
+ {.compatible = "hisilicon,hns-dsaf-v2"},
+ {}
+};
+
+static struct platform_driver g_dsaf_driver = {
+ .probe = hns_dsaf_probe,
+ .remove = hns_dsaf_remove,
+ .driver = {
+ .name = DSAF_DRV_NAME,
+ .of_match_table = g_dsaf_match,
+ },
+};
+
+module_platform_driver(g_dsaf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HNS DSAF driver");
+MODULE_VERSION(DSAF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
new file mode 100644
index 000000000000..b2b93484995c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNS_DSAF_MAIN_H
+#define __HNS_DSAF_MAIN_H
+#include "hnae.h"
+
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_mac.h"
+
+struct hns_mac_cb;
+
+#define DSAF_DRV_NAME "hns_dsaf"
+#define DSAF_MOD_VERSION "v1.0"
+
+#define ENABLE (0x1)
+#define DISABLE (0x0)
+
+#define HNS_DSAF_DEBUG_NW_REG_OFFSET (0x100000)
+
+#define DSAF_BASE_INNER_PORT_NUM (127) /* mac tbl qid*/
+
+#define DSAF_MAX_CHIP_NUM (2) /*max 2 chips */
+
+#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE (22)
+
+#define HNS_DSAF_MAX_DESC_CNT (1024)
+#define HNS_DSAF_MIN_DESC_CNT (16)
+
+#define DSAF_INVALID_ENTRY_IDX (0xffff)
+
+#define DSAF_CFG_READ_CNT (30)
+#define DSAF_SRAM_INIT_FINISH_FLAG (0xff)
+
+#define MAC_NUM_OCTETS_PER_ADDR 6
+
+#define DSAF_DUMP_REGS_NUM 504
+#define DSAF_STATIC_NUM 28
+
+#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+
+enum hal_dsaf_mode {
+ HRD_DSAF_NO_DSAF_MODE = 0x0,
+ HRD_DSAF_MODE = 0x1,
+};
+
+enum hal_dsaf_tc_mode {
+ HRD_DSAF_4TC_MODE = 0X0,
+ HRD_DSAF_8TC_MODE = 0X1,
+};
+
+struct dsaf_vm_def_vlan {
+ u32 vm_def_vlan_id;
+ u32 vm_def_vlan_cfi;
+ u32 vm_def_vlan_pri;
+};
+
+struct dsaf_tbl_tcam_data {
+ u32 tbl_tcam_data_high;
+ u32 tbl_tcam_data_low;
+};
+
+#define DSAF_PORT_MSK_NUM \
+ ((DSAF_TOTAL_QUEUE_NUM + DSAF_SERVICE_NW_NUM - 1) / 32 + 1)
+struct dsaf_tbl_tcam_mcast_cfg {
+ u8 tbl_mcast_old_en;
+ u8 tbl_mcast_item_vld;
+ u32 tbl_mcast_port_msk[DSAF_PORT_MSK_NUM];
+};
+
+struct dsaf_tbl_tcam_ucast_cfg {
+ u32 tbl_ucast_old_en;
+ u32 tbl_ucast_item_vld;
+ u32 tbl_ucast_mac_discard;
+ u32 tbl_ucast_dvc;
+ u32 tbl_ucast_out_port;
+};
+
+struct dsaf_tbl_line_cfg {
+ u32 tbl_line_mac_discard;
+ u32 tbl_line_dvc;
+ u32 tbl_line_out_port;
+};
+
+enum dsaf_port_rate_mode {
+ DSAF_PORT_RATE_1000 = 0,
+ DSAF_PORT_RATE_2500,
+ DSAF_PORT_RATE_10000
+};
+
+enum dsaf_stp_port_type {
+ DSAF_STP_PORT_TYPE_DISCARD = 0,
+ DSAF_STP_PORT_TYPE_BLOCK = 1,
+ DSAF_STP_PORT_TYPE_LISTEN = 2,
+ DSAF_STP_PORT_TYPE_LEARN = 3,
+ DSAF_STP_PORT_TYPE_FORWARD = 4
+};
+
+enum dsaf_sw_port_type {
+ DSAF_SW_PORT_TYPE_NON_VLAN = 0,
+ DSAF_SW_PORT_TYPE_ACCESS = 1,
+ DSAF_SW_PORT_TYPE_TRUNK = 2,
+};
+
+#define DSAF_SUB_BASE_SIZE (0x10000)
+
+/* dsaf mode define */
+enum dsaf_mode {
+ DSAF_MODE_INVALID = 0, /**< Invalid dsaf mode */
+ DSAF_MODE_ENABLE_FIX, /**< en DSAF-mode, fixed to queue*/
+ DSAF_MODE_ENABLE_0VM, /**< en DSAF-mode, support 0 VM */
+ DSAF_MODE_ENABLE_8VM, /**< en DSAF-mode, support 8 VM */
+ DSAF_MODE_ENABLE_16VM, /**< en DSAF-mode, support 16 VM */
+ DSAF_MODE_ENABLE_32VM, /**< en DSAF-mode, support 32 VM */
+ DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */
+ DSAF_MODE_ENABLE, /**< before is enable DSAF mode*/
+ DSAF_MODE_DISABLE_FIX, /**< non-dasf, fixed to queue*/
+ DSAF_MODE_DISABLE_2PORT_8VM, /**< non-dasf, 2port 8VM */
+ DSAF_MODE_DISABLE_2PORT_16VM, /**< non-dasf, 2port 16VM */
+ DSAF_MODE_DISABLE_2PORT_64VM, /**< non-dasf, 2port 64VM */
+ DSAF_MODE_DISABLE_6PORT_0VM, /**< non-dasf, 6port 0VM */
+ DSAF_MODE_DISABLE_6PORT_2VM, /**< non-dasf, 6port 2VM */
+ DSAF_MODE_DISABLE_6PORT_4VM, /**< non-dasf, 6port 4VM */
+ DSAF_MODE_DISABLE_6PORT_16VM, /**< non-dasf, 6port 16VM */
+ DSAF_MODE_MAX /**< the last one, use as the num */
+};
+
+#define DSAF_DEST_PORT_NUM 256 /* DSAF max port num */
+#define DSAF_WORD_BIT_CNT 32 /* the num bit of word */
+
+/*mac entry, mc or uc entry*/
+struct dsaf_drv_mac_single_dest_entry {
+ /* mac addr, match the entry*/
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+ u16 in_vlan_id; /* value of VlanId */
+
+ /* the vld input port num, dsaf-mode fix 0, */
+ /* non-dasf is the entry whitch port vld*/
+ u8 in_port_num;
+
+ u8 port_num; /*output port num*/
+ u8 rsv[6];
+};
+
+/*only mc entry*/
+struct dsaf_drv_mac_multi_dest_entry {
+ /* mac addr, match the entry*/
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+ u16 in_vlan_id;
+ /* this mac addr output port,*/
+ /* bit0-bit5 means Port0-Port5(1bit is vld)**/
+ u32 port_mask[DSAF_DEST_PORT_NUM / DSAF_WORD_BIT_CNT];
+
+ /* the vld input port num, dsaf-mode fix 0,*/
+ /* non-dasf is the entry whitch port vld*/
+ u8 in_port_num;
+ u8 rsv[7];
+};
+
+struct dsaf_hw_stats {
+ u64 pad_drop;
+ u64 man_pkts;
+ u64 rx_pkts;
+ u64 rx_pkt_id;
+ u64 rx_pause_frame;
+ u64 release_buf_num;
+ u64 sbm_drop;
+ u64 crc_false;
+ u64 bp_drop;
+ u64 rslt_drop;
+ u64 local_addr_false;
+ u64 vlan_drop;
+ u64 stp_drop;
+ u64 tx_pkts;
+};
+
+struct hnae_vf_cb {
+ u8 port_index;
+ struct hns_mac_cb *mac_cb;
+ struct dsaf_device *dsaf_dev;
+ struct hnae_handle ae_handle; /* must be the last number */
+};
+
+struct dsaf_int_xge_src {
+ u32 xid_xge_ecc_err_int_src;
+ u32 xid_xge_fsm_timout_int_src;
+ u32 sbm_xge_lnk_fsm_timout_int_src;
+ u32 sbm_xge_lnk_ecc_2bit_int_src;
+ u32 sbm_xge_mib_req_failed_int_src;
+ u32 sbm_xge_mib_req_fsm_timout_int_src;
+ u32 sbm_xge_mib_rels_fsm_timout_int_src;
+ u32 sbm_xge_sram_ecc_2bit_int_src;
+ u32 sbm_xge_mib_buf_sum_err_int_src;
+ u32 sbm_xge_mib_req_extra_int_src;
+ u32 sbm_xge_mib_rels_extra_int_src;
+ u32 voq_xge_start_to_over_0_int_src;
+ u32 voq_xge_start_to_over_1_int_src;
+ u32 voq_xge_ecc_err_int_src;
+};
+
+struct dsaf_int_ppe_src {
+ u32 xid_ppe_fsm_timout_int_src;
+ u32 sbm_ppe_lnk_fsm_timout_int_src;
+ u32 sbm_ppe_lnk_ecc_2bit_int_src;
+ u32 sbm_ppe_mib_req_failed_int_src;
+ u32 sbm_ppe_mib_req_fsm_timout_int_src;
+ u32 sbm_ppe_mib_rels_fsm_timout_int_src;
+ u32 sbm_ppe_sram_ecc_2bit_int_src;
+ u32 sbm_ppe_mib_buf_sum_err_int_src;
+ u32 sbm_ppe_mib_req_extra_int_src;
+ u32 sbm_ppe_mib_rels_extra_int_src;
+ u32 voq_ppe_start_to_over_0_int_src;
+ u32 voq_ppe_ecc_err_int_src;
+ u32 xod_ppe_fifo_rd_empty_int_src;
+ u32 xod_ppe_fifo_wr_full_int_src;
+};
+
+struct dsaf_int_rocee_src {
+ u32 xid_rocee_fsm_timout_int_src;
+ u32 sbm_rocee_lnk_fsm_timout_int_src;
+ u32 sbm_rocee_lnk_ecc_2bit_int_src;
+ u32 sbm_rocee_mib_req_failed_int_src;
+ u32 sbm_rocee_mib_req_fsm_timout_int_src;
+ u32 sbm_rocee_mib_rels_fsm_timout_int_src;
+ u32 sbm_rocee_sram_ecc_2bit_int_src;
+ u32 sbm_rocee_mib_buf_sum_err_int_src;
+ u32 sbm_rocee_mib_req_extra_int_src;
+ u32 sbm_rocee_mib_rels_extra_int_src;
+ u32 voq_rocee_start_to_over_0_int_src;
+ u32 voq_rocee_ecc_err_int_src;
+};
+
+struct dsaf_int_tbl_src {
+ u32 tbl_da0_mis_src;
+ u32 tbl_da1_mis_src;
+ u32 tbl_da2_mis_src;
+ u32 tbl_da3_mis_src;
+ u32 tbl_da4_mis_src;
+ u32 tbl_da5_mis_src;
+ u32 tbl_da6_mis_src;
+ u32 tbl_da7_mis_src;
+ u32 tbl_sa_mis_src;
+ u32 tbl_old_sech_end_src;
+ u32 lram_ecc_err1_src;
+ u32 lram_ecc_err2_src;
+ u32 tram_ecc_err1_src;
+ u32 tram_ecc_err2_src;
+ u32 tbl_ucast_bcast_xge0_src;
+ u32 tbl_ucast_bcast_xge1_src;
+ u32 tbl_ucast_bcast_xge2_src;
+ u32 tbl_ucast_bcast_xge3_src;
+ u32 tbl_ucast_bcast_xge4_src;
+ u32 tbl_ucast_bcast_xge5_src;
+ u32 tbl_ucast_bcast_ppe_src;
+ u32 tbl_ucast_bcast_rocee_src;
+};
+
+struct dsaf_int_stat {
+ struct dsaf_int_xge_src dsaf_int_xge_stat[DSAF_COMM_CHN];
+ struct dsaf_int_ppe_src dsaf_int_ppe_stat[DSAF_COMM_CHN];
+ struct dsaf_int_rocee_src dsaf_int_rocee_stat[DSAF_COMM_CHN];
+ struct dsaf_int_tbl_src dsaf_int_tbl_stat[1];
+
+};
+
+/* Dsaf device struct define ,and mac -> dsaf */
+struct dsaf_device {
+ struct device *dev;
+ struct hnae_ae_dev ae_dev;
+
+ void *priv;
+
+ int virq[DSAF_IRQ_NUM];
+
+ u8 __iomem *sc_base;
+ u8 __iomem *sds_base;
+ u8 __iomem *ppe_base;
+ u8 __iomem *io_base;
+ u8 __iomem *cpld_base;
+
+ u32 desc_num; /* desc num per queue*/
+ u32 buf_size; /* ring buffer size */
+ int buf_size_type; /* ring buffer size-type */
+ enum dsaf_mode dsaf_mode; /* dsaf mode */
+ enum hal_dsaf_mode dsaf_en;
+ enum hal_dsaf_tc_mode dsaf_tc_mode;
+ u32 dsaf_ver;
+
+ struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
+ struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
+ struct hns_mac_cb *mac_cb;
+
+ struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
+ struct dsaf_int_stat int_stat;
+};
+
+static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
+{
+ return (void *)((u8 *)dsaf_dev + sizeof(*dsaf_dev));
+}
+
+struct dsaf_drv_tbl_tcam_key {
+ union {
+ struct {
+ u8 mac_3;
+ u8 mac_2;
+ u8 mac_1;
+ u8 mac_0;
+ } bits;
+
+ u32 val;
+ } high;
+ union {
+ struct {
+ u32 port:4; /* port id, */
+ /* dsaf-mode fixed 0, non-dsaf-mode port id*/
+ u32 vlan:12; /* vlan id */
+ u32 mac_5:8;
+ u32 mac_4:8;
+ } bits;
+
+ u32 val;
+ } low;
+};
+
+struct dsaf_drv_soft_mac_tbl {
+ struct dsaf_drv_tbl_tcam_key tcam_key;
+ u16 index; /*the entry's index in tcam tab*/
+};
+
+struct dsaf_drv_priv {
+ /* soft tab Mac key, for hardware tab*/
+ struct dsaf_drv_soft_mac_tbl *soft_mac_tbl;
+};
+
+static inline void hns_dsaf_tbl_tcam_addr_cfg(struct dsaf_device *dsaf_dev,
+ u32 tab_tcam_addr)
+{
+ dsaf_set_dev_field(dsaf_dev, DSAF_TBL_TCAM_ADDR_0_REG,
+ DSAF_TBL_TCAM_ADDR_M, DSAF_TBL_TCAM_ADDR_S,
+ tab_tcam_addr);
+}
+
+static inline void hns_dsaf_tbl_tcam_load_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
+ u32 tab_line_addr)
+{
+ dsaf_set_dev_field(dsaf_dev, DSAF_TBL_LINE_ADDR_0_REG,
+ DSAF_TBL_LINE_ADDR_M, DSAF_TBL_LINE_ADDR_S,
+ tab_line_addr);
+}
+
+static inline int hns_dsaf_get_comm_idx_by_port(int port)
+{
+ if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP))
+ return 0;
+ else
+ return (port - DSAF_COMM_CHN + 1);
+}
+
+static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
+ struct hnae_handle *handle)
+{
+ return container_of(handle, struct hnae_vf_cb, ae_handle);
+}
+
+int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_set_mac_mc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+ u8 in_port_num, u8 *addr);
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+int hns_dsaf_get_mac_entry_by_index(
+ struct dsaf_device *dsaf_dev,
+ u16 entry_index,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+
+void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val);
+
+void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+
+void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val);
+
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+ u32 port, u32 val);
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
+
+int hns_dsaf_get_sset_count(int stringset);
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
+void hns_dsaf_get_strings(int stringset, u8 *data, int port);
+
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
+int hns_dsaf_get_regs_count(void);
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+
+#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
new file mode 100644
index 000000000000..523e9b83d304
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_ppe.h"
+
+void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
+{
+ int speed_reg = 0;
+ u8 value;
+
+ if (!mac_cb) {
+ pr_err("sfp_led_opt mac_dev is null!\n");
+ return;
+ }
+ if (!mac_cb->cpld_vaddr) {
+ dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n",
+ mac_cb->mac_id);
+ return;
+ }
+
+ if (speed == MAC_SPEED_10000)
+ speed_reg = 1;
+
+ value = mac_cb->cpld_led_value;
+
+ if (link_status) {
+ dsaf_set_bit(value, DSAF_LED_LINK_B, link_status);
+ dsaf_set_field(value, DSAF_LED_SPEED_M,
+ DSAF_LED_SPEED_S, speed_reg);
+ dsaf_set_bit(value, DSAF_LED_DATA_B, data);
+
+ if (value != mac_cb->cpld_led_value) {
+ dsaf_write_b(mac_cb->cpld_vaddr, value);
+ mac_cb->cpld_led_value = value;
+ }
+ } else {
+ dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+ mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+ }
+}
+
+void cpld_led_reset(struct hns_mac_cb *mac_cb)
+{
+ if (!mac_cb || !mac_cb->cpld_vaddr)
+ return;
+
+ dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+ mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+}
+
+int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
+ dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+ CPLD_LED_ON_VALUE);
+ dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+ return 2;
+ case HNAE_LED_INACTIVE:
+ dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+ CPLD_LED_DEFAULT_VALUE);
+ dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define RESET_REQ_OR_DREQ 1
+
+void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
+{
+ u32 xbar_reg_addr;
+ u32 nt_reg_addr;
+
+ if (!val) {
+ xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
+ nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
+ } else {
+ xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_DREQ_REG;
+ nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
+ }
+
+ dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr,
+ RESET_REQ_OR_DREQ);
+ dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr,
+ RESET_REQ_OR_DREQ);
+}
+
+void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ if (port >= DSAF_XGE_NUM)
+ return;
+
+ reg_val |= RESET_REQ_OR_DREQ;
+ reg_val |= 0x2082082 << port;
+
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+ dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+ u32 port, u32 val)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ if (port >= DSAF_XGE_NUM)
+ return;
+
+ reg_val |= XGMAC_TRX_CORE_SRST_M << port;
+
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+ dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+ u32 reg_val_1;
+ u32 reg_val_2;
+
+ if (port >= DSAF_GE_NUM)
+ return;
+
+ if (port < DSAF_SERVICE_NW_NUM) {
+ reg_val_1 = 0x1 << port;
+ reg_val_2 = 0x1041041 << port;
+
+ if (val == 0) {
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ reg_val_1);
+
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_REQ0_REG,
+ reg_val_2);
+ } else {
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+ reg_val_2);
+
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ reg_val_1);
+ }
+ } else {
+ reg_val_1 = 0x15540 << (port - 6);
+ reg_val_2 = 0x100 << (port - 6);
+
+ if (val == 0) {
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ reg_val_1);
+
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_PPE_RESET_REQ_REG,
+ reg_val_2);
+ } else {
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ reg_val_1);
+
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+ reg_val_2);
+ }
+ }
+}
+
+void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ reg_val |= RESET_REQ_OR_DREQ << port;
+
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+
+ dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
+{
+ int comm_index = ppe_common->comm_index;
+ struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
+ u32 reg_val;
+ u32 reg_addr;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ reg_val = RESET_REQ_OR_DREQ;
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
+
+ } else {
+ reg_val = 0x100 << (comm_index - 1);
+
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+ }
+
+ dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+/**
+ * hns_mac_get_sds_mode - get phy ifterface form serdes mode
+ * @mac_cb: mac control block
+ * retuen phy interface
+ */
+phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
+{
+ u32 hilink3_mode;
+ u32 hilink4_mode;
+ void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
+ int dev_id = mac_cb->mac_id;
+ phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+
+ hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG);
+ hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG);
+ if (dev_id >= 0 && dev_id <= 3) {
+ if (hilink4_mode == 0)
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_XGMII;
+ } else if (dev_id >= 4 && dev_id <= 5) {
+ if (hilink3_mode == 0)
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_XGMII;
+ } else {
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ }
+
+ dev_dbg(mac_cb->dev,
+ "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
+ hilink3_mode, hilink4_mode, dev_id, phy_if);
+ return phy_if;
+}
+
+/**
+ * hns_mac_config_sds_loopback - set loop back for serdes
+ * @mac_cb: mac control block
+ * retuen 0 == success
+ */
+int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
+{
+ /* port 0-3 hilink4 base is serdes_vaddr + 0x00280000
+ * port 4-7 hilink3 base is serdes_vaddr + 0x00200000
+ */
+ u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
+ const u8 lane_id[] = {
+ 0, /* mac 0 -> lane 0 */
+ 1, /* mac 1 -> lane 1 */
+ 2, /* mac 2 -> lane 2 */
+ 3, /* mac 3 -> lane 3 */
+ 2, /* mac 4 -> lane 2 */
+ 3, /* mac 5 -> lane 3 */
+ 0, /* mac 6 -> lane 0 */
+ 1 /* mac 7 -> lane 1 */
+ };
+#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
+ u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
+
+ int sfp_prsnt;
+ int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+
+ if (!mac_cb->phy_node) {
+ if (ret)
+ pr_info("please confirm sfp is present or not\n");
+ else
+ if (!sfp_prsnt)
+ pr_info("no sfp in this eth\n");
+ }
+
+ dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
new file mode 100644
index 000000000000..419f07aa9734
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_MISC_H
+#define _HNS_DSAF_MISC_H
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_mac.h"
+
+#define CPLD_ADDR_PORT_OFFSET 0x4
+
+#define HS_LED_ON 0xE
+#define HS_LED_OFF 0xF
+
+#define CPLD_LED_ON_VALUE 1
+#define CPLD_LED_DEFAULT_VALUE 0
+
+#define MAC_SFP_PORT_OFFSET 0x2
+
+#define DSAF_LED_SPEED_S 0
+#define DSAF_LED_SPEED_M (0x3 << DSAF_LED_SPEED_S)
+
+#define DSAF_LED_LINK_B 2
+#define DSAF_LED_DATA_B 4
+#define DSAF_LED_ANCHOR_B 5
+
+void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data);
+void cpld_led_reset(struct hns_mac_cb *mac_cb);
+int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
new file mode 100644
index 000000000000..67f33f185a44
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include "hns_dsaf_ppe.h"
+
+static void __iomem *hns_ppe_common_get_ioaddr(
+ struct ppe_common_cb *ppe_common)
+{
+ void __iomem *base_addr;
+
+ int idx = ppe_common->comm_index;
+
+ if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ base_addr = ppe_common->dsaf_dev->ppe_base
+ + PPE_COMMON_REG_OFFSET;
+ else
+ base_addr = ppe_common->dsaf_dev->sds_base
+ + (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
+ + PPE_COMMON_REG_OFFSET;
+
+ return base_addr;
+}
+
+/**
+ * hns_ppe_common_get_cfg - get ppe common config
+ * @dsaf_dev: dasf device
+ * comm_index: common index
+ * retuen 0 - success , negative --fail
+ */
+int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+{
+ struct ppe_common_cb *ppe_common;
+ int ppe_num;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
+ else
+ ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
+
+ ppe_common = devm_kzalloc(dsaf_dev->dev, sizeof(*ppe_common) +
+ ppe_num * sizeof(struct hns_ppe_cb), GFP_KERNEL);
+ if (!ppe_common)
+ return -ENOMEM;
+
+ ppe_common->ppe_num = ppe_num;
+ ppe_common->dsaf_dev = dsaf_dev;
+ ppe_common->comm_index = comm_index;
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
+ else
+ ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
+ ppe_common->dev = dsaf_dev->dev;
+
+ ppe_common->io_base = hns_ppe_common_get_ioaddr(ppe_common);
+
+ dsaf_dev->ppe_common[comm_index] = ppe_common;
+
+ return 0;
+}
+
+void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+{
+ dsaf_dev->ppe_common[comm_index] = NULL;
+}
+
+static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+ int ppe_idx)
+{
+ void __iomem *base_addr;
+ int common_idx = ppe_common->comm_index;
+
+ if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+ base_addr = ppe_common->dsaf_dev->ppe_base +
+ ppe_idx * PPE_REG_OFFSET;
+
+ } else {
+ base_addr = ppe_common->dsaf_dev->sds_base +
+ (common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET;
+ }
+
+ return base_addr;
+}
+
+static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx)
+{
+ int port;
+
+ if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE)
+ port = idx;
+ else
+ port = HNS_PPE_SERVICE_NW_ENGINE_NUM
+ + ppe_common->comm_index - 1;
+
+ return port;
+}
+
+static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
+{
+ u32 i;
+ struct hns_ppe_cb *ppe_cb;
+ u32 ppe_num = ppe_common->ppe_num;
+
+ for (i = 0; i < ppe_num; i++) {
+ ppe_cb = &ppe_common->ppe_cb[i];
+ ppe_cb->dev = ppe_common->dev;
+ ppe_cb->next = NULL;
+ ppe_cb->ppe_common_cb = ppe_common;
+ ppe_cb->index = i;
+ ppe_cb->port = hns_ppe_get_port(ppe_common, i);
+ ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
+ ppe_cb->virq = 0;
+ }
+}
+
+static void hns_ppe_cnt_clr_ce(struct hns_ppe_cb *ppe_cb)
+{
+ dsaf_set_dev_bit(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG,
+ PPE_CNT_CLR_CE_B, 1);
+}
+
+/**
+ * hns_ppe_checksum_hw - set ppe checksum caculate
+ * @ppe_device: ppe device
+ * @value: value
+ */
+static void hns_ppe_checksum_hw(struct hns_ppe_cb *ppe_cb, u32 value)
+{
+ dsaf_set_dev_field(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG,
+ 0xfffffff, 0, value);
+}
+
+static void hns_ppe_set_qid_mode(struct ppe_common_cb *ppe_common,
+ enum ppe_qid_mode qid_mdoe)
+{
+ dsaf_set_dev_field(ppe_common, PPE_COM_CFG_QID_MODE_REG,
+ PPE_CFG_QID_MODE_CF_QID_MODE_M,
+ PPE_CFG_QID_MODE_CF_QID_MODE_S, qid_mdoe);
+}
+
+/**
+ * hns_ppe_set_qid - set ppe qid
+ * @ppe_common: ppe common device
+ * @qid: queue id
+ */
+static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
+{
+ u32 qid_mod = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+
+ if (!dsaf_get_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+ PPE_CFG_QID_MODE_DEF_QID_S)) {
+ dsaf_set_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+ PPE_CFG_QID_MODE_DEF_QID_S, qid);
+ dsaf_write_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG, qid_mod);
+ }
+}
+
+/**
+ * hns_ppe_set_port_mode - set port mode
+ * @ppe_device: ppe device
+ * @mode: port mode
+ */
+static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
+ enum ppe_port_mode mode)
+{
+ dsaf_write_dev(ppe_cb, PPE_CFG_XGE_MODE_REG, mode);
+}
+
+/**
+ * hns_ppe_common_init_hw - init ppe common device
+ * @ppe_common: ppe common device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
+{
+ enum ppe_qid_mode qid_mode;
+ enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode;
+
+ hns_ppe_com_srst(ppe_common, 0);
+ mdelay(100);
+ hns_ppe_com_srst(ppe_common, 1);
+ mdelay(100);
+
+ if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+ switch (dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ case DSAF_MODE_DISABLE_FIX:
+ qid_mode = PPE_QID_MODE0;
+ hns_ppe_set_qid(ppe_common, 0);
+ break;
+ case DSAF_MODE_ENABLE_0VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ qid_mode = PPE_QID_MODE3;
+ break;
+ case DSAF_MODE_ENABLE_8VM:
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ qid_mode = PPE_QID_MODE4;
+ break;
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ qid_mode = PPE_QID_MODE5;
+ break;
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ qid_mode = PPE_QID_MODE2;
+ break;
+ case DSAF_MODE_ENABLE_128VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ qid_mode = PPE_QID_MODE1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ qid_mode = PPE_QID_MODE7;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ qid_mode = PPE_QID_MODE6;
+ break;
+ default:
+ dev_err(ppe_common->dev,
+ "get ppe queue mode failed! dsaf_mode=%d\n",
+ dsaf_mode);
+ return -EINVAL;
+ }
+ hns_ppe_set_qid_mode(ppe_common, qid_mode);
+ }
+
+ dsaf_set_dev_bit(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG,
+ PPE_COMMON_CNT_CLR_CE_B, 1);
+
+ return 0;
+}
+
+/*clr ppe exception irq*/
+static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+ u32 vld_msk = 0;
+
+ /*only care bit 0,1,7*/
+ dsaf_set_bit(vld_msk, 0, 1);
+ dsaf_set_bit(vld_msk, 1, 1);
+ dsaf_set_bit(vld_msk, 7, 1);
+
+ /*clr sts**/
+ dsaf_write_dev(ppe_cb, PPE_RINT_REG, clr_vlue);
+
+ /*for some reserved bits, so set 0**/
+ dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
+}
+
+/**
+ * ppe_init_hw - init ppe
+ * @ppe_device: ppe device
+ */
+static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
+{
+ struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
+ u32 port = ppe_cb->port;
+ struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
+
+ hns_ppe_srst_by_port(dsaf_dev, port, 0);
+ mdelay(10);
+ hns_ppe_srst_by_port(dsaf_dev, port, 1);
+
+ /* clr and msk except irq*/
+ hns_ppe_exc_irq_en(ppe_cb, 0);
+
+ if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG)
+ hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
+ else
+ hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+ hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
+ hns_ppe_cnt_clr_ce(ppe_cb);
+}
+
+/**
+ * ppe_uninit_hw - uninit ppe
+ * @ppe_device: ppe device
+ */
+static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
+{
+ u32 port;
+
+ if (ppe_cb->ppe_common_cb) {
+ port = ppe_cb->index;
+ hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0);
+ }
+}
+
+void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+{
+ u32 i;
+
+ for (i = 0; i < ppe_common->ppe_num; i++) {
+ hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+ memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
+ }
+}
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ if (dsaf_dev->ppe_common[i])
+ hns_ppe_uninit_ex(dsaf_dev->ppe_common[i]);
+ hns_rcb_common_free_cfg(dsaf_dev, i);
+ hns_ppe_common_free_cfg(dsaf_dev, i);
+ }
+}
+
+/**
+ * hns_ppe_reset - reinit ppe/rcb hw
+ * @dsaf_dev: dasf device
+ * retuen void
+ */
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
+{
+ u32 i;
+ int ret;
+ struct ppe_common_cb *ppe_common;
+
+ ppe_common = dsaf_dev->ppe_common[ppe_common_index];
+ ret = hns_ppe_common_init_hw(ppe_common);
+ if (ret)
+ return;
+
+ ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
+ if (ret)
+ return;
+
+ for (i = 0; i < ppe_common->ppe_num; i++)
+ hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+
+ hns_rcb_common_init_commit_hw(dsaf_dev->rcb_common[ppe_common_index]);
+}
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
+{
+ struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+ hw_stats->rx_pkts_from_sw
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+ hw_stats->rx_pkts
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+ hw_stats->rx_drop_no_bd
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+ hw_stats->rx_alloc_buf_fail
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+ hw_stats->rx_alloc_buf_wait
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+ hw_stats->rx_drop_no_buf
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+ hw_stats->rx_err_fifo_full
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+ hw_stats->tx_bd_form_rcb
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+ hw_stats->tx_pkts_from_rcb
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+ hw_stats->tx_pkts
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+ hw_stats->tx_err_fifo_empty
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+ hw_stats->tx_err_checksum
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+}
+
+int hns_ppe_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ETH_PPE_STATIC_NUM;
+ return 0;
+}
+
+int hns_ppe_get_regs_count(void)
+{
+ return ETH_PPE_DUMP_NUM;
+}
+
+/**
+ * ppe_get_strings - get ppe srting
+ * @ppe_device: ppe device
+ * @stringset: string set type
+ * @data: output string
+ */
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
+{
+ char *buff = (char *)data;
+ int index = ppe_cb->index;
+
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_sw_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_ok", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_drop_pkt_no_bd", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_fail", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_wait", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_drop_no_buf", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_err_fifo_full", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_bd", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_ok", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_fifo_empty", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_csum_fail", index);
+}
+
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
+{
+ u64 *regs_buff = data;
+ struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+ regs_buff[0] = hw_stats->rx_pkts_from_sw;
+ regs_buff[1] = hw_stats->rx_pkts;
+ regs_buff[2] = hw_stats->rx_drop_no_bd;
+ regs_buff[3] = hw_stats->rx_alloc_buf_fail;
+ regs_buff[4] = hw_stats->rx_alloc_buf_wait;
+ regs_buff[5] = hw_stats->rx_drop_no_buf;
+ regs_buff[6] = hw_stats->rx_err_fifo_full;
+
+ regs_buff[7] = hw_stats->tx_bd_form_rcb;
+ regs_buff[8] = hw_stats->tx_pkts_from_rcb;
+ regs_buff[9] = hw_stats->tx_pkts;
+ regs_buff[10] = hw_stats->tx_err_fifo_empty;
+ regs_buff[11] = hw_stats->tx_err_checksum;
+}
+
+/**
+ * hns_ppe_init - init ppe device
+ * @dsaf_dev: dasf device
+ * retuen 0 - success , negative --fail
+ */
+int hns_ppe_init(struct dsaf_device *dsaf_dev)
+{
+ int i, k;
+ int ret;
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ ret = hns_ppe_common_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto get_ppe_cfg_fail;
+
+ ret = hns_rcb_common_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto get_rcb_cfg_fail;
+
+ hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
+
+ hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+ }
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++)
+ hns_ppe_reset_common(dsaf_dev, i);
+
+ return 0;
+
+get_rcb_cfg_fail:
+ hns_ppe_common_free_cfg(dsaf_dev, i);
+get_ppe_cfg_fail:
+ for (k = i - 1; k >= 0; k--) {
+ hns_rcb_common_free_cfg(dsaf_dev, k);
+ hns_ppe_common_free_cfg(dsaf_dev, k);
+ }
+ return ret;
+}
+
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data)
+{
+ struct ppe_common_cb *ppe_common = ppe_cb->ppe_common_cb;
+ u32 *regs = data;
+ u32 i;
+ u32 offset;
+
+ /* ppe common registers */
+ regs[0] = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+ regs[1] = dsaf_read_dev(ppe_common, PPE_COM_INTEN_REG);
+ regs[2] = dsaf_read_dev(ppe_common, PPE_COM_RINT_REG);
+ regs[3] = dsaf_read_dev(ppe_common, PPE_COM_INTSTS_REG);
+ regs[4] = dsaf_read_dev(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG);
+
+ for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++) {
+ offset = PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 0x4 * i;
+ regs[5 + i] = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM]
+ = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 2]
+ = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 3]
+ = dsaf_read_dev(ppe_common, offset);
+ }
+
+ /* mark end of ppe regs */
+ for (i = 521; i < 524; i++)
+ regs[i] = 0xeeeeeeee;
+
+ /* ppe channel registers */
+ regs[525] = dsaf_read_dev(ppe_cb, PPE_CFG_TX_FIFO_THRSLD_REG);
+ regs[526] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_THRSLD_REG);
+ regs[527] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG);
+ regs[528] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG);
+ regs[529] = dsaf_read_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG);
+ regs[530] = dsaf_read_dev(ppe_cb, PPE_CFG_BUS_CTRL_REG);
+ regs[531] = dsaf_read_dev(ppe_cb, PPE_CFG_TNL_TO_BE_RST_REG);
+ regs[532] = dsaf_read_dev(ppe_cb, PPE_CURR_TNL_CAN_RST_REG);
+
+ regs[533] = dsaf_read_dev(ppe_cb, PPE_CFG_XGE_MODE_REG);
+ regs[534] = dsaf_read_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG);
+ regs[535] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_MODE_REG);
+ regs[536] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_VLAN_TAG_REG);
+ regs[537] = dsaf_read_dev(ppe_cb, PPE_CFG_TAG_GEN_REG);
+ regs[538] = dsaf_read_dev(ppe_cb, PPE_CFG_PARSE_TAG_REG);
+ regs[539] = dsaf_read_dev(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG);
+
+ regs[540] = dsaf_read_dev(ppe_cb, PPE_INTEN_REG);
+ regs[541] = dsaf_read_dev(ppe_cb, PPE_RINT_REG);
+ regs[542] = dsaf_read_dev(ppe_cb, PPE_INTSTS_REG);
+ regs[543] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_INT_REG);
+
+ regs[544] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME0_REG);
+ regs[545] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME1_REG);
+
+ /* ppe static */
+ regs[546] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+ regs[547] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+ regs[548] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+ regs[549] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+ regs[550] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+ regs[551] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+ regs[552] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+ regs[553] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+ regs[554] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+ regs[555] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+ regs[556] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+ regs[557] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+ regs[558] = dsaf_read_dev(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG);
+ regs[559] = dsaf_read_dev(ppe_cb, PPE_CFG_AXI_DBG_REG);
+ regs[560] = dsaf_read_dev(ppe_cb, PPE_HIS_PRO_ERR_REG);
+ regs[561] = dsaf_read_dev(ppe_cb, PPE_HIS_TNL_FIFO_ERR_REG);
+ regs[562] = dsaf_read_dev(ppe_cb, PPE_CURR_CFF_DATA_NUM_REG);
+ regs[563] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_ST_REG);
+ regs[564] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_ST_REG);
+ regs[565] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO0_REG);
+ regs[566] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO1_REG);
+ regs[567] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG);
+ regs[568] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO1_REG);
+ regs[569] = dsaf_read_dev(ppe_cb, PPE_ECO0_REG);
+ regs[570] = dsaf_read_dev(ppe_cb, PPE_ECO1_REG);
+ regs[571] = dsaf_read_dev(ppe_cb, PPE_ECO2_REG);
+
+ /* mark end of ppe regs */
+ for (i = 572; i < 576; i++)
+ regs[i] = 0xeeeeeeee;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
new file mode 100644
index 000000000000..4894f9a0d39f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_PPE_H
+#define _HNS_DSAF_PPE_H
+
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
+
+#define HNS_PPE_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_PPE_DEBUG_NW_ENGINE_NUM 1
+#define HNS_PPE_COM_NUM DSAF_COMM_DEV_NUM
+
+#define PPE_COMMON_REG_OFFSET 0x70000
+#define PPE_REG_OFFSET 0x10000
+
+#define ETH_PPE_DUMP_NUM 576
+#define ETH_PPE_STATIC_NUM 12
+enum ppe_qid_mode {
+ PPE_QID_MODE0 = 0, /* fixed queue id mode */
+ PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */
+ PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */
+ PPE_QID_MODE3, /* switch:4TC/8TAG non switch:2Port/64VM */
+ PPE_QID_MODE4, /* switch:8VM/16TAG non switch:2Port/16VM/4TC */
+ PPE_QID_MODE5, /* non switch:6Port/16TAG */
+ PPE_QID_MODE6, /* non switch:6Port/2VM/8TC */
+ PPE_QID_MODE7, /* non switch:2Port/8VM/8TC */
+};
+
+enum ppe_port_mode {
+ PPE_MODE_GE = 0,
+ PPE_MODE_XGE,
+};
+
+enum ppe_common_mode {
+ PPE_COMMON_MODE_DEBUG = 0,
+ PPE_COMMON_MODE_SERVICE,
+ PPE_COMMON_MODE_MAX
+};
+
+struct hns_ppe_hw_stats {
+ u64 rx_pkts_from_sw;
+ u64 rx_pkts;
+ u64 rx_drop_no_bd;
+ u64 rx_alloc_buf_fail;
+ u64 rx_alloc_buf_wait;
+ u64 rx_drop_no_buf;
+ u64 rx_err_fifo_full;
+ u64 tx_bd_form_rcb;
+ u64 tx_pkts_from_rcb;
+ u64 tx_pkts;
+ u64 tx_err_fifo_empty;
+ u64 tx_err_checksum;
+};
+
+struct hns_ppe_cb {
+ struct device *dev;
+ struct hns_ppe_cb *next; /* pointer to next ppe device */
+ struct ppe_common_cb *ppe_common_cb; /* belong to */
+ struct hns_ppe_hw_stats hw_stats;
+
+ u8 index; /* index in a ppe common device */
+ u8 port; /* port id in dsaf */
+ void __iomem *io_base;
+ int virq;
+};
+
+struct ppe_common_cb {
+ struct device *dev;
+ struct dsaf_device *dsaf_dev;
+ void __iomem *io_base;
+
+ enum ppe_common_mode ppe_mode;
+
+ u8 comm_index; /*ppe_common index*/
+
+ u32 ppe_num;
+ struct hns_ppe_cb ppe_cb[0];
+
+};
+
+int hns_ppe_init(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index);
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb);
+
+int hns_ppe_get_sset_count(int stringset);
+int hns_ppe_get_regs_count(void);
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data);
+
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data);
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data);
+#endif /* _HNS_DSAF_PPE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
new file mode 100644
index 000000000000..4db32c62f062
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -0,0 +1,1021 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/cacheflush.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define RCB_COMMON_REG_OFFSET 0x80000
+#define TX_RING 0
+#define RX_RING 1
+
+#define RCB_RESET_WAIT_TIMES 30
+#define RCB_RESET_TRY_TIMES 10
+
+/**
+ *hns_rcb_wait_fbd_clean - clean fbd
+ *@qs: ring struct pointer array
+ *@qnum: num of array
+ *@flag: tx or rx flag
+ */
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
+{
+ int i, wait_cnt;
+ u32 fbd_num;
+
+ for (wait_cnt = i = 0; i < q_num; wait_cnt++) {
+ usleep_range(200, 300);
+ fbd_num = 0;
+ if (flag & RCB_INT_FLAG_TX)
+ fbd_num += dsaf_read_dev(qs[i],
+ RCB_RING_TX_RING_FBDNUM_REG);
+ if (flag & RCB_INT_FLAG_RX)
+ fbd_num += dsaf_read_dev(qs[i],
+ RCB_RING_RX_RING_FBDNUM_REG);
+ if (!fbd_num)
+ i++;
+ if (wait_cnt >= 10000)
+ break;
+ }
+
+ if (i < q_num)
+ dev_err(qs[i]->handle->owner_dev,
+ "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
+}
+
+/**
+ *hns_rcb_reset_ring_hw - ring reset
+ *@q: ring struct pointer
+ */
+void hns_rcb_reset_ring_hw(struct hnae_queue *q)
+{
+ u32 wait_cnt;
+ u32 try_cnt = 0;
+ u32 could_ret;
+
+ u32 tx_fbd_num;
+
+ while (try_cnt++ < RCB_RESET_TRY_TIMES) {
+ usleep_range(100, 200);
+ tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
+ if (tx_fbd_num)
+ continue;
+
+ dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+ msleep(20);
+ could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+ wait_cnt = 0;
+ while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) {
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+ msleep(20);
+ could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+ wait_cnt++;
+ }
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+ if (could_ret)
+ break;
+ }
+
+ if (try_cnt >= RCB_RESET_TRY_TIMES)
+ dev_err(q->dev->dev, "port%d reset ring fail\n",
+ hns_ae_get_vf_cb(q->handle)->port_index);
+}
+
+/**
+ *hns_rcb_int_ctrl_hw - rcb irq enable control
+ *@q: hnae queue struct pointer
+ *@flag:ring flag tx or rx
+ *@mask:mask
+ */
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
+{
+ u32 int_mask_en = !!mask;
+
+ if (flag & RCB_INT_FLAG_TX) {
+ dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
+ dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
+ int_mask_en);
+ }
+
+ if (flag & RCB_INT_FLAG_RX) {
+ dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
+ dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
+ int_mask_en);
+ }
+}
+
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
+{
+ u32 clr = 1;
+
+ if (flag & RCB_INT_FLAG_TX) {
+ dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, clr);
+ dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, clr);
+ }
+
+ if (flag & RCB_INT_FLAG_RX) {
+ dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, clr);
+ dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, clr);
+ }
+}
+
+/**
+ *hns_rcb_ring_enable_hw - enable ring
+ *@ring: rcb ring
+ */
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
+{
+ dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
+}
+
+void hns_rcb_start(struct hnae_queue *q, u32 val)
+{
+ hns_rcb_ring_enable_hw(q, val);
+}
+
+/**
+ *hns_rcb_common_init_commit_hw - make rcb common init completed
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
+{
+ wmb(); /* Sync point before breakpoint */
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1);
+ wmb(); /* Sync point after breakpoint */
+}
+
+/**
+ *hns_rcb_ring_init - init rcb ring
+ *@ring_pair: ring pair control block
+ *@ring_type: ring type, RX_RING or TX_RING
+ */
+static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
+{
+ struct hnae_queue *q = &ring_pair->q;
+ struct rcb_common_cb *rcb_common = ring_pair->rcb_common;
+ u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type;
+ struct hnae_ring *ring =
+ (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
+ dma_addr_t dma = ring->desc_dma_addr;
+
+ if (ring_type == RX_RING) {
+ dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
+ bd_size_type);
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
+ ring_pair->port_id_in_dsa);
+ dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
+ ring_pair->port_id_in_dsa);
+ } else {
+ dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
+ bd_size_type);
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
+ ring_pair->port_id_in_dsa);
+ dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
+ ring_pair->port_id_in_dsa);
+ }
+}
+
+/**
+ *hns_rcb_init_hw - init rcb hardware
+ *@ring: rcb ring
+ */
+void hns_rcb_init_hw(struct ring_pair_cb *ring)
+{
+ hns_rcb_ring_init(ring, RX_RING);
+ hns_rcb_ring_init(ring, TX_RING);
+}
+
+/**
+ *hns_rcb_set_port_desc_cnt - set rcb port description num
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@desc_cnt:BD num
+ */
+static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
+ u32 port_idx, u32 desc_cnt)
+{
+ if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+ port_idx = 0;
+
+ dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4,
+ desc_cnt);
+}
+
+/**
+ *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@coalesced_frames:BD num for coalesced frames
+ */
+static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
+ u32 port_idx,
+ u32 coalesced_frames)
+{
+ if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+ port_idx = 0;
+ if (coalesced_frames >= rcb_common->desc_num ||
+ coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
+ return -EINVAL;
+
+ dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+ coalesced_frames);
+ return 0;
+}
+
+/**
+ *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ * return coaleseced frames value
+ */
+static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
+ u32 port_idx)
+{
+ if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+ port_idx = 0;
+
+ return dsaf_read_dev(rcb_common,
+ RCB_CFG_PKTLINE_REG + port_idx * 4);
+}
+
+/**
+ *hns_rcb_set_timeout - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@time_out:time for coalesced time_out
+ */
+static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
+ u32 timeout)
+{
+ dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
+}
+
+static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
+{
+ if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ return HNS_RCB_SERVICE_NW_ENGINE_NUM;
+ else
+ return HNS_RCB_DEBUG_NW_ENGINE_NUM;
+}
+
+/*clr rcb comm exception irq**/
+static void hns_rcb_comm_exc_irq_en(
+ struct rcb_common_cb *rcb_common, int en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0 : 0xfffffffful;
+
+ /* clr int*/
+ dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue);
+ dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue);
+
+ /*en msk*/
+ dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue);
+
+ /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2);
+
+ dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue);
+ dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue);
+}
+
+/**
+ *hns_rcb_common_init_hw - init rcb common hardware
+ *@rcb_common: rcb_common device
+ *retuen 0 - success , negative --fail
+ */
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
+{
+ u32 reg_val;
+ int i;
+ int port_num = hns_rcb_common_get_port_num(rcb_common);
+
+ hns_rcb_comm_exc_irq_en(rcb_common, 0);
+
+ reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG);
+ if (0x1 != (reg_val & 0x1)) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < port_num; i++) {
+ hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
+ (void)hns_rcb_set_port_coalesced_frames(
+ rcb_common, i, rcb_common->coalesced_frames);
+ }
+ hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
+
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
+ HNS_RCB_COMMON_ENDIAN);
+
+ return 0;
+}
+
+int hns_rcb_buf_size2type(u32 buf_size)
+{
+ int bd_size_type;
+
+ switch (buf_size) {
+ case 512:
+ bd_size_type = HNS_BD_SIZE_512_TYPE;
+ break;
+ case 1024:
+ bd_size_type = HNS_BD_SIZE_1024_TYPE;
+ break;
+ case 2048:
+ bd_size_type = HNS_BD_SIZE_2048_TYPE;
+ break;
+ case 4096:
+ bd_size_type = HNS_BD_SIZE_4096_TYPE;
+ break;
+ default:
+ bd_size_type = -EINVAL;
+ }
+
+ return bd_size_type;
+}
+
+static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
+{
+ struct hnae_ring *ring;
+ struct rcb_common_cb *rcb_common;
+ struct ring_pair_cb *ring_pair_cb;
+ u32 buf_size;
+ u16 desc_num;
+ int irq_idx;
+
+ ring_pair_cb = container_of(q, struct ring_pair_cb, q);
+ if (ring_type == RX_RING) {
+ ring = &q->rx_ring;
+ ring->io_base = ring_pair_cb->q.io_base;
+ irq_idx = HNS_RCB_IRQ_IDX_RX;
+ } else {
+ ring = &q->tx_ring;
+ ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+ HNS_RCB_TX_REG_OFFSET;
+ irq_idx = HNS_RCB_IRQ_IDX_TX;
+ }
+
+ rcb_common = ring_pair_cb->rcb_common;
+ buf_size = rcb_common->dsaf_dev->buf_size;
+ desc_num = rcb_common->dsaf_dev->desc_num;
+
+ ring->desc = NULL;
+ ring->desc_cb = NULL;
+
+ ring->irq = ring_pair_cb->virq[irq_idx];
+ ring->desc_dma_addr = 0;
+
+ ring->buf_size = buf_size;
+ ring->desc_num = desc_num;
+ ring->max_desc_num_per_pkt = HNS_RCB_RING_MAX_BD_PER_PKT;
+ ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
+ ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
+ ring->next_to_use = 0;
+ ring->next_to_clean = 0;
+}
+
+static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
+{
+ ring_pair_cb->q.handle = NULL;
+
+ hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
+ hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
+}
+
+static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
+{
+ int comm_index = rcb_common->comm_index;
+ int port;
+ int q_num;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
+ port = ring_idx / q_num;
+ } else {
+ port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1;
+ }
+
+ return port;
+}
+
+static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
+{
+ int comm_index = rcb_common->comm_index;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ return HNS_SERVICE_RING_IRQ_IDX;
+ else
+ return HNS_DEBUG_RING_IRQ_IDX + (comm_index - 1) * 2;
+}
+
+#define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
+ ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
+/**
+ *hns_rcb_get_cfg - get rcb config
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
+{
+ struct ring_pair_cb *ring_pair_cb;
+ u32 i;
+ u32 ring_num = rcb_common->ring_num;
+ int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
+ struct device_node *np = rcb_common->dsaf_dev->dev->of_node;
+
+ for (i = 0; i < ring_num; i++) {
+ ring_pair_cb = &rcb_common->ring_pair_cb[i];
+ ring_pair_cb->rcb_common = rcb_common;
+ ring_pair_cb->dev = rcb_common->dsaf_dev->dev;
+ ring_pair_cb->index = i;
+ ring_pair_cb->q.io_base =
+ RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
+ ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i);
+ ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX]
+ = irq_of_parse_and_map(np, base_irq_idx + i * 2);
+ ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX]
+ = irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1);
+ ring_pair_cb->q.phy_base =
+ RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
+ hns_rcb_ring_pair_get_cfg(ring_pair_cb);
+ }
+}
+
+/**
+ *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@comm_index:port index
+ *return coalesced_frames
+ */
+u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port)
+{
+ int comm_index = hns_dsaf_get_comm_idx_by_port(port);
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+ return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+}
+
+/**
+ *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@comm_index:port index
+ *return time_out
+ */
+u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index)
+{
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+ return rcb_comm->timeout;
+}
+
+/**
+ *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@comm_index: comm :index
+ *@etx_usecs:tx time for coalesced time_out
+ *@rx_usecs:rx time for coalesced time_out
+ */
+void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
+ int port, u32 timeout)
+{
+ int comm_index = hns_dsaf_get_comm_idx_by_port(port);
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+ if (rcb_comm->timeout == timeout)
+ return;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ dev_err(dsaf_dev->dev,
+ "error: not support coalesce_usecs setting!\n");
+ return;
+ }
+ rcb_comm->timeout = timeout;
+ hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout);
+}
+
+/**
+ *hns_rcb_set_coalesced_frames - set rcb coalesced frames
+ *@rcb_common: rcb_common device
+ *@tx_frames:tx BD num for coalesced frames
+ *@rx_frames:rx BD num for coalesced frames
+ *Return 0 on success, negative on failure
+ */
+int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
+ int port, u32 coalesced_frames)
+{
+ int comm_index = hns_dsaf_get_comm_idx_by_port(port);
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+ u32 coalesced_reg_val;
+ int ret;
+
+ coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+
+ if (coalesced_reg_val == coalesced_frames)
+ return 0;
+
+ if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) {
+ ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port,
+ coalesced_frames);
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+}
+
+/**
+ *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
+ * accordding to dsaf mode
+ *@dsaf_mode: dsaf mode
+ *@max_vfn : max vfn number
+ *@max_q_per_vf:max ring number per vm
+ */
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+ u16 *max_vfn, u16 *max_q_per_vf)
+{
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ switch (dsaf_mode) {
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ *max_vfn = 1;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ *max_vfn = 64;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ *max_vfn = 16;
+ *max_q_per_vf = 1;
+ break;
+ default:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ }
+ } else {
+ *max_vfn = 1;
+ *max_q_per_vf = 1;
+ }
+}
+
+int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index)
+{
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ switch (dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ return 1;
+
+ case DSAF_MODE_DISABLE_FIX:
+ return 6;
+
+ case DSAF_MODE_ENABLE_0VM:
+ return 32;
+
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_ENABLE_8VM:
+ return 96;
+
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_ENABLE_128VM:
+ return 128;
+
+ default:
+ dev_warn(dsaf_dev->dev,
+ "get ring num fail,use default!dsaf_mode=%d\n",
+ dsaf_dev->dsaf_mode);
+ return 128;
+ }
+ } else {
+ return 1;
+ }
+}
+
+void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev,
+ int comm_index)
+{
+ void __iomem *base_addr;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
+ else
+ base_addr = dsaf_dev->sds_base
+ + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
+ + RCB_COMMON_REG_OFFSET;
+
+ return base_addr;
+}
+
+static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev,
+ int comm_index)
+{
+ struct device_node *np = dsaf_dev->dev->of_node;
+ phys_addr_t phy_addr;
+ const __be32 *tmp_addr;
+ u64 addr_offset = 0;
+ u64 size = 0;
+ int index = 0;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ index = 2;
+ addr_offset = RCB_COMMON_REG_OFFSET;
+ } else {
+ index = 1;
+ addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET +
+ RCB_COMMON_REG_OFFSET;
+ }
+ tmp_addr = of_get_address(np, index, &size, NULL);
+ phy_addr = of_translate_address(np, tmp_addr);
+ return phy_addr + addr_offset;
+}
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
+ int comm_index)
+{
+ struct rcb_common_cb *rcb_common;
+ enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
+ u16 max_vfn;
+ u16 max_q_per_vf;
+ int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index);
+
+ rcb_common =
+ devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
+ ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL);
+ if (!rcb_common) {
+ dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
+ return -ENOMEM;
+ }
+ rcb_common->comm_index = comm_index;
+ rcb_common->ring_num = ring_num;
+ rcb_common->dsaf_dev = dsaf_dev;
+
+ rcb_common->desc_num = dsaf_dev->desc_num;
+ rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
+ rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
+
+ hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
+ rcb_common->max_vfn = max_vfn;
+ rcb_common->max_q_per_vf = max_q_per_vf;
+
+ rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index);
+ rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index);
+
+ dsaf_dev->rcb_common[comm_index] = rcb_common;
+ return 0;
+}
+
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev,
+ u32 comm_index)
+{
+ dsaf_dev->rcb_common[comm_index] = NULL;
+}
+
+void hns_rcb_update_stats(struct hnae_queue *queue)
+{
+ struct ring_pair_cb *ring =
+ container_of(queue, struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
+ struct ppe_common_cb *ppe_common
+ = dsaf_dev->ppe_common[ring->rcb_common->comm_index];
+ struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+ hw_stats->rx_pkts += dsaf_read_dev(queue,
+ RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+ dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
+
+ hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+ hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
+
+ hw_stats->tx_pkts += dsaf_read_dev(queue,
+ RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+ dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
+
+ hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+ hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
+}
+
+/**
+ *hns_rcb_get_stats - get rcb statistic
+ *@ring: rcb ring
+ *@data:statistic value
+ */
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
+{
+ u64 *regs_buff = data;
+ struct ring_pair_cb *ring =
+ container_of(queue, struct ring_pair_cb, q);
+ struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+ regs_buff[0] = hw_stats->tx_pkts;
+ regs_buff[1] = hw_stats->ppe_tx_ok_pkts;
+ regs_buff[2] = hw_stats->ppe_tx_drop_pkts;
+ regs_buff[3] =
+ dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+
+ regs_buff[4] = queue->tx_ring.stats.tx_pkts;
+ regs_buff[5] = queue->tx_ring.stats.tx_bytes;
+ regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
+ regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
+ regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
+ regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
+ regs_buff[10] = queue->tx_ring.stats.restart_queue;
+ regs_buff[11] = queue->tx_ring.stats.tx_busy;
+
+ regs_buff[12] = hw_stats->rx_pkts;
+ regs_buff[13] = hw_stats->ppe_rx_ok_pkts;
+ regs_buff[14] = hw_stats->ppe_rx_drop_pkts;
+ regs_buff[15] =
+ dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+
+ regs_buff[16] = queue->rx_ring.stats.rx_pkts;
+ regs_buff[17] = queue->rx_ring.stats.rx_bytes;
+ regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
+ regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
+ regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
+ regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
+ regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
+ regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
+ regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
+ regs_buff[25] = queue->rx_ring.stats.err_bd_num;
+ regs_buff[26] = queue->rx_ring.stats.l2_err;
+ regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
+}
+
+/**
+ *hns_rcb_get_ring_sset_count - rcb string set count
+ *@stringset:ethtool cmd
+ *return rcb ring string set count
+ */
+int hns_rcb_get_ring_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return HNS_RING_STATIC_REG_NUM;
+
+ return 0;
+}
+
+/**
+ *hns_rcb_get_common_regs_count - rcb common regs count
+ *return regs count
+ */
+int hns_rcb_get_common_regs_count(void)
+{
+ return HNS_RCB_COMMON_DUMP_REG_NUM;
+}
+
+/**
+ *rcb_get_sset_count - rcb ring regs count
+ *return regs count
+ */
+int hns_rcb_get_ring_regs_count(void)
+{
+ return HNS_RCB_RING_DUMP_REG_NUM;
+}
+
+/**
+ *hns_rcb_get_strings - get rcb string set
+ *@stringset:string set index
+ *@data:strings name value
+ *@index:queue index
+ */
+void hns_rcb_get_strings(int stringset, u8 *data, int index)
+{
+ char *buff = (char *)data;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index);
+}
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
+{
+ u32 *regs = data;
+ u32 i = 0;
+
+ /*rcb common registers */
+ regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
+ regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG);
+ regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG);
+
+ regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG);
+ regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG);
+ regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG);
+ regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG);
+ regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG);
+ regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG);
+
+ regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG);
+ regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG);
+ regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG);
+ regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG);
+ regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG);
+ regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG);
+ regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG);
+ regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG);
+ regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG);
+ regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG);
+ regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG);
+ regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG);
+ regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG);
+ regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG);
+ regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG);
+ regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG);
+ regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG);
+ regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG);
+ regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG);
+
+ regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING);
+ regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS);
+ regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING);
+ regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD);
+ regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS);
+ regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY);
+ regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN);
+ regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK);
+ regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS);
+ regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG);
+
+ /* rcb common entry registers */
+ for (i = 0; i < 16; i++) { /* total 16 model registers */
+ regs[38 + i]
+ = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i);
+ regs[54 + i]
+ = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
+ }
+
+ regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG);
+ regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+ regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+
+ /* mark end of rcb common regs */
+ for (i = 73; i < 80; i++)
+ regs[i] = 0xcccccccc;
+}
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
+{
+ u32 *regs = data;
+ struct ring_pair_cb *ring_pair
+ = container_of(queue, struct ring_pair_cb, q);
+ u32 i = 0;
+
+ /*rcb ring registers */
+ regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
+ regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
+ regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
+ regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
+ regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
+ regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
+ regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
+ regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+ regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+
+ regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
+ regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
+ regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
+ regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
+ regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
+ regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
+ regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
+ regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+ regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
+ regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+
+ regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
+ regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
+ regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
+ regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
+ regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
+ regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
+ regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
+
+ regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
+ regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
+ regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
+ regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
+ regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
+ regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
+ regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
+ regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
+
+ /* mark end of ring regs */
+ for (i = 35; i < 40; i++)
+ regs[i] = 0xcccccc00 + ring_pair->index;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
new file mode 100644
index 000000000000..3a2afe2dd8bb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_RCB_H
+#define _HNS_DSAF_RCB_H
+
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "hnae.h"
+#include "hns_dsaf_main.h"
+
+struct rcb_common_cb;
+
+#define HNS_RCB_IRQ_NUM_PER_QUEUE 2
+#define HNS_RCB_IRQ_IDX_TX 0
+#define HNS_RCB_IRQ_IDX_RX 1
+#define HNS_RCB_TX_REG_OFFSET 0x40
+
+#define HNS_RCB_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_RCB_DEBUG_NW_ENGINE_NUM 1
+#define HNS_RCB_RING_MAX_BD_PER_PKT 3
+#define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU
+
+#define HNS_RCB_RING_MAX_PENDING_BD 1024
+#define HNS_RCB_RING_MIN_PENDING_BD 16
+
+#define HNS_RCB_REG_OFFSET 0x10000
+
+#define HNS_RCB_MAX_COALESCED_FRAMES 1023
+#define HNS_RCB_MIN_COALESCED_FRAMES 1
+#define HNS_RCB_DEF_COALESCED_FRAMES 50
+#define HNS_RCB_MAX_TIME_OUT 0x500
+
+#define HNS_RCB_COMMON_ENDIAN 1
+
+#define HNS_BD_SIZE_512_TYPE 0
+#define HNS_BD_SIZE_1024_TYPE 1
+#define HNS_BD_SIZE_2048_TYPE 2
+#define HNS_BD_SIZE_4096_TYPE 3
+
+#define HNS_RCB_COMMON_DUMP_REG_NUM 80
+#define HNS_RCB_RING_DUMP_REG_NUM 40
+#define HNS_RING_STATIC_REG_NUM 28
+
+#define HNS_DUMP_REG_NUM 500
+#define HNS_STATIC_REG_NUM 12
+
+enum rcb_int_flag {
+ RCB_INT_FLAG_TX = 0x1,
+ RCB_INT_FLAG_RX = (0x1 << 1),
+ RCB_INT_FLAG_MAX = (0x1 << 2), /*must be the last element */
+};
+
+struct hns_ring_hw_stats {
+ u64 tx_pkts;
+ u64 ppe_tx_ok_pkts;
+ u64 ppe_tx_drop_pkts;
+ u64 rx_pkts;
+ u64 ppe_rx_ok_pkts;
+ u64 ppe_rx_drop_pkts;
+};
+
+struct ring_pair_cb {
+ struct rcb_common_cb *rcb_common; /* ring belongs to */
+ struct device *dev; /*device for DMA mapping */
+ struct hnae_queue q;
+
+ u16 index; /* global index in a rcb common device */
+ u16 buf_size;
+
+ int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
+
+ u8 port_id_in_dsa;
+ u8 used_by_vf;
+
+ struct hns_ring_hw_stats hw_stats;
+};
+
+struct rcb_common_cb {
+ u8 __iomem *io_base;
+ phys_addr_t phy_base;
+ struct dsaf_device *dsaf_dev;
+ u16 max_vfn;
+ u16 max_q_per_vf;
+
+ u8 comm_index;
+ u32 ring_num;
+ u32 coalesced_frames; /* frames threshold of rx interrupt */
+ u32 timeout; /* time threshold of rx interrupt */
+ u32 desc_num; /* desc num per queue*/
+
+ struct ring_pair_cb ring_pair_cb[0];
+};
+
+int hns_rcb_buf_size2type(u32 buf_size);
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_start(struct hnae_queue *q, u32 val);
+void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+ u16 *max_vfn, u16 *max_q_per_vf);
+
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable);
+void hns_rcb_init_hw(struct ring_pair_cb *ring);
+void hns_rcb_reset_ring_hw(struct hnae_queue *q);
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+
+u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index);
+u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
+ int comm_index, u32 timeout);
+int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
+ int comm_index, u32 coalesce_frames);
+void hns_rcb_update_stats(struct hnae_queue *queue);
+
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_common, void *data);
+
+int hns_rcb_get_ring_sset_count(int stringset);
+int hns_rcb_get_common_regs_count(void);
+int hns_rcb_get_ring_regs_count(void);
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
+
+void hns_rcb_get_strings(int stringset, u8 *data, int index);
+#endif /* _HNS_DSAF_RCB_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
new file mode 100644
index 000000000000..b475e1bf2e6f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -0,0 +1,972 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DSAF_REG_H_
+#define _DSAF_REG_H_
+
+#define HNS_GE_FIFO_ERR_INTNUM 8
+#define HNS_XGE_ERR_INTNUM 6
+#define HNS_RCB_COMM_ERR_INTNUM 12
+#define HNS_PPE_TNL_ERR_INTNUM 8
+#define HNS_DSAF_EVENT_INTNUM 21
+#define HNS_DEBUG_RING_INTNUM 4
+#define HNS_SERVICE_RING_INTNUM 256
+
+#define HNS_DEBUG_RING_IRQ_IDX (HNS_GE_FIFO_ERR_INTNUM + HNS_XGE_ERR_INTNUM +\
+ HNS_RCB_COMM_ERR_INTNUM + HNS_PPE_TNL_ERR_INTNUM +\
+ HNS_DSAF_EVENT_INTNUM)
+#define HNS_SERVICE_RING_IRQ_IDX (HNS_DEBUG_RING_IRQ_IDX +\
+ HNS_DEBUG_RING_INTNUM)
+
+#define DSAF_IRQ_NUM 18
+
+#define DSAF_MAX_PORT_NUM_PER_CHIP 8
+#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6
+#define DSAF_MAX_VM_NUM 128
+
+#define DSAF_COMM_DEV_NUM 3
+#define DSAF_PPE_INODE_BASE 6
+#define HNS_DSAF_COMM_SERVICE_NW_IDX 0
+#define DSAF_DEBUG_NW_NUM 2
+#define DSAF_SERVICE_NW_NUM 6
+#define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM
+#define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM
+#define DSAF_NODE_NUM 18
+#define DSAF_XOD_BIG_NUM DSAF_NODE_NUM
+#define DSAF_SBM_NUM DSAF_NODE_NUM
+#define DSAF_VOQ_NUM DSAF_NODE_NUM
+#define DSAF_INODE_NUM DSAF_NODE_NUM
+#define DSAF_XOD_NUM 8
+#define DSAF_TBL_NUM 8
+#define DSAF_SW_PORT_NUM 8
+#define DSAF_TOTAL_QUEUE_NUM 129
+
+#define DSAF_TCAM_SUM 512
+#define DSAF_LINE_SUM (2048 * 14)
+
+#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194
+#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300
+#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304
+#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308
+#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C
+#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310
+#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314
+#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318
+#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C
+#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320
+#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354
+#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00
+#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04
+#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08
+#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C
+#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10
+#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14
+#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18
+#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C
+#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20
+#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24
+#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48
+#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C
+#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060
+#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300
+#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300
+#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304
+#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308
+#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C
+#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310
+#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314
+#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328
+#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00
+#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04
+#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08
+#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C
+#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10
+#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44
+
+/*serdes offset**/
+#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
+#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
+#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
+#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
+#define HNS_MAC_LANE3_CTLEDFE_REG 0x000BFF9CULL
+#define HNS_MAC_LANE0_STATE_REG 0x000BFFD4ULL
+#define HNS_MAC_LANE1_STATE_REG 0x000BFFC4ULL
+#define HNS_MAC_LANE2_STATE_REG 0x000BFFB4ULL
+#define HNS_MAC_LANE3_STATE_REG 0x000BFFA4ULL
+
+#define HILINK_RESET_TIMOUT 10000
+
+#define DSAF_SRAM_INIT_OVER_0_REG 0x0
+#define DSAF_CFG_0_REG 0x4
+#define DSAF_ECC_ERR_INVERT_0_REG 0x8
+#define DSAF_ABNORMAL_TIMEOUT_0_REG 0x1C
+#define DSAF_FSM_TIMEOUT_0_REG 0x20
+#define DSAF_DSA_REG_CNT_CLR_CE_REG 0x2C
+#define DSAF_DSA_SBM_INF_FIFO_THRD_REG 0x30
+#define DSAF_DSA_SRAM_1BIT_ECC_SEL_REG 0x34
+#define DSAF_DSA_SRAM_1BIT_ECC_CNT_REG 0x38
+#define DSAF_PFC_EN_0_REG 0x50
+#define DSAF_PFC_UNIT_CNT_0_REG 0x70
+#define DSAF_XGE_INT_MSK_0_REG 0x100
+#define DSAF_PPE_INT_MSK_0_REG 0x120
+#define DSAF_ROCEE_INT_MSK_0_REG 0x140
+#define DSAF_XGE_INT_SRC_0_REG 0x160
+#define DSAF_PPE_INT_SRC_0_REG 0x180
+#define DSAF_ROCEE_INT_SRC_0_REG 0x1A0
+#define DSAF_XGE_INT_STS_0_REG 0x1C0
+#define DSAF_PPE_INT_STS_0_REG 0x1E0
+#define DSAF_ROCEE_INT_STS_0_REG 0x200
+#define DSAF_PPE_QID_CFG_0_REG 0x300
+#define DSAF_SW_PORT_TYPE_0_REG 0x320
+#define DSAF_STP_PORT_TYPE_0_REG 0x340
+#define DSAF_MIX_DEF_QID_0_REG 0x360
+#define DSAF_PORT_DEF_VLAN_0_REG 0x380
+#define DSAF_VM_DEF_VLAN_0_REG 0x400
+
+#define DSAF_INODE_CUT_THROUGH_CFG_0_REG 0x1000
+#define DSAF_INODE_ECC_INVERT_EN_0_REG 0x1008
+#define DSAF_INODE_ECC_ERR_ADDR_0_REG 0x100C
+#define DSAF_INODE_IN_PORT_NUM_0_REG 0x1018
+#define DSAF_INODE_PRI_TC_CFG_0_REG 0x101C
+#define DSAF_INODE_BP_STATUS_0_REG 0x1020
+#define DSAF_INODE_PAD_DISCARD_NUM_0_REG 0x1028
+#define DSAF_INODE_FINAL_IN_MAN_NUM_0_REG 0x102C
+#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030
+#define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038
+#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C
+#define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C
+#define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050
+#define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054
+#define DSAF_INODE_BP_DISCARD_NUM_0_REG 0x1058
+#define DSAF_INODE_RSLT_DISCARD_NUM_0_REG 0x105C
+#define DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG 0x1060
+#define DSAF_INODE_VOQ_OVER_NUM_0_REG 0x1068
+#define DSAF_INODE_BD_SAVE_STATUS_0_REG 0x1900
+#define DSAF_INODE_BD_ORDER_STATUS_0_REG 0x1950
+#define DSAF_INODE_SW_VLAN_TAG_DISC_0_REG 0x1A00
+#define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
+#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
+#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
+
+#define DSAF_SBM_CFG_REG_0_REG 0x2000
+#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004
+#define DSAF_SBM_BP_CFG_0_PPE_REG_0_REG 0x2304
+#define DSAF_SBM_BP_CFG_0_ROCEE_REG_0_REG 0x2604
+#define DSAF_SBM_BP_CFG_1_REG_0_REG 0x2008
+#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
+#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
+#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
+#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
+#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
+#define DSAF_SBM_BP_CNT_0_0_REG 0x2018
+#define DSAF_SBM_BP_CNT_1_0_REG 0x201C
+#define DSAF_SBM_BP_CNT_2_0_REG 0x2020
+#define DSAF_SBM_BP_CNT_3_0_REG 0x2024
+#define DSAF_SBM_INER_ST_0_REG 0x2028
+#define DSAF_SBM_MIB_REQ_FAILED_TC_0_REG 0x202C
+#define DSAF_SBM_LNK_INPORT_CNT_0_REG 0x2030
+#define DSAF_SBM_LNK_DROP_CNT_0_REG 0x2034
+#define DSAF_SBM_INF_OUTPORT_CNT_0_REG 0x2038
+#define DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG 0x203C
+#define DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG 0x2040
+#define DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG 0x2044
+#define DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG 0x2048
+#define DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG 0x204C
+#define DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG 0x2050
+#define DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG 0x2054
+#define DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG 0x2058
+#define DSAF_SBM_LNK_REQ_CNT_0_REG 0x205C
+#define DSAF_SBM_LNK_RELS_CNT_0_REG 0x2060
+#define DSAF_SBM_BP_CFG_3_REG_0_REG 0x2068
+#define DSAF_SBM_BP_CFG_4_REG_0_REG 0x206C
+
+#define DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG 0x3000
+#define DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG 0x3004
+#define DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG 0x3008
+#define DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG 0x300C
+#define DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG 0x3010
+#define DSAF_XOD_ETS_TOKEN_CFG_0_REG 0x3014
+#define DSAF_XOD_PFS_CFG_0_0_REG 0x3018
+#define DSAF_XOD_PFS_CFG_1_0_REG 0x301C
+#define DSAF_XOD_PFS_CFG_2_0_REG 0x3020
+#define DSAF_XOD_GNT_L_0_REG 0x3024
+#define DSAF_XOD_GNT_H_0_REG 0x3028
+#define DSAF_XOD_CONNECT_STATE_0_REG 0x302C
+#define DSAF_XOD_RCVPKT_CNT_0_REG 0x3030
+#define DSAF_XOD_RCVTC0_CNT_0_REG 0x3034
+#define DSAF_XOD_RCVTC1_CNT_0_REG 0x3038
+#define DSAF_XOD_RCVTC2_CNT_0_REG 0x303C
+#define DSAF_XOD_RCVTC3_CNT_0_REG 0x3040
+#define DSAF_XOD_RCVVC0_CNT_0_REG 0x3044
+#define DSAF_XOD_RCVVC1_CNT_0_REG 0x3048
+#define DSAF_XOD_XGE_RCVIN0_CNT_0_REG 0x304C
+#define DSAF_XOD_XGE_RCVIN1_CNT_0_REG 0x3050
+#define DSAF_XOD_XGE_RCVIN2_CNT_0_REG 0x3054
+#define DSAF_XOD_XGE_RCVIN3_CNT_0_REG 0x3058
+#define DSAF_XOD_XGE_RCVIN4_CNT_0_REG 0x305C
+#define DSAF_XOD_XGE_RCVIN5_CNT_0_REG 0x3060
+#define DSAF_XOD_XGE_RCVIN6_CNT_0_REG 0x3064
+#define DSAF_XOD_XGE_RCVIN7_CNT_0_REG 0x3068
+#define DSAF_XOD_PPE_RCVIN0_CNT_0_REG 0x306C
+#define DSAF_XOD_PPE_RCVIN1_CNT_0_REG 0x3070
+#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074
+#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078
+#define DSAF_XOD_FIFO_STATUS_0_REG 0x307C
+
+#define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004
+#define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008
+#define DSAF_VOQ_IN_PKT_NUM_0_REG 0x400C
+#define DSAF_VOQ_OUT_PKT_NUM_0_REG 0x4010
+#define DSAF_VOQ_ECC_ERR_ADDR_0_REG 0x4014
+#define DSAF_VOQ_BP_STATUS_0_REG 0x4018
+#define DSAF_VOQ_SPUP_IDLE_0_REG 0x401C
+#define DSAF_VOQ_XGE_XOD_REQ_0_0_REG 0x4024
+#define DSAF_VOQ_XGE_XOD_REQ_1_0_REG 0x4028
+#define DSAF_VOQ_PPE_XOD_REQ_0_REG 0x402C
+#define DSAF_VOQ_ROCEE_XOD_REQ_0_REG 0x4030
+#define DSAF_VOQ_BP_ALL_THRD_0_REG 0x4034
+
+#define DSAF_TBL_CTRL_0_REG 0x5000
+#define DSAF_TBL_INT_MSK_0_REG 0x5004
+#define DSAF_TBL_INT_SRC_0_REG 0x5008
+#define DSAF_TBL_INT_STS_0_REG 0x5100
+#define DSAF_TBL_TCAM_ADDR_0_REG 0x500C
+#define DSAF_TBL_LINE_ADDR_0_REG 0x5010
+#define DSAF_TBL_TCAM_HIGH_0_REG 0x5014
+#define DSAF_TBL_TCAM_LOW_0_REG 0x5018
+#define DSAF_TBL_TCAM_MCAST_CFG_4_0_REG 0x501C
+#define DSAF_TBL_TCAM_MCAST_CFG_3_0_REG 0x5020
+#define DSAF_TBL_TCAM_MCAST_CFG_2_0_REG 0x5024
+#define DSAF_TBL_TCAM_MCAST_CFG_1_0_REG 0x5028
+#define DSAF_TBL_TCAM_MCAST_CFG_0_0_REG 0x502C
+#define DSAF_TBL_TCAM_UCAST_CFG_0_REG 0x5030
+#define DSAF_TBL_LIN_CFG_0_REG 0x5034
+#define DSAF_TBL_TCAM_RDATA_HIGH_0_REG 0x5038
+#define DSAF_TBL_TCAM_RDATA_LOW_0_REG 0x503C
+#define DSAF_TBL_TCAM_RAM_RDATA4_0_REG 0x5040
+#define DSAF_TBL_TCAM_RAM_RDATA3_0_REG 0x5044
+#define DSAF_TBL_TCAM_RAM_RDATA2_0_REG 0x5048
+#define DSAF_TBL_TCAM_RAM_RDATA1_0_REG 0x504C
+#define DSAF_TBL_TCAM_RAM_RDATA0_0_REG 0x5050
+#define DSAF_TBL_LIN_RDATA_0_REG 0x5054
+#define DSAF_TBL_DA0_MIS_INFO1_0_REG 0x5058
+#define DSAF_TBL_DA0_MIS_INFO0_0_REG 0x505C
+#define DSAF_TBL_SA_MIS_INFO2_0_REG 0x5104
+#define DSAF_TBL_SA_MIS_INFO1_0_REG 0x5098
+#define DSAF_TBL_SA_MIS_INFO0_0_REG 0x509C
+#define DSAF_TBL_PUL_0_REG 0x50A0
+#define DSAF_TBL_OLD_RSLT_0_REG 0x50A4
+#define DSAF_TBL_OLD_SCAN_VAL_0_REG 0x50A8
+#define DSAF_TBL_DFX_CTRL_0_REG 0x50AC
+#define DSAF_TBL_DFX_STAT_0_REG 0x50B0
+#define DSAF_TBL_DFX_STAT_2_0_REG 0x5108
+#define DSAF_TBL_LKUP_NUM_I_0_REG 0x50C0
+#define DSAF_TBL_LKUP_NUM_O_0_REG 0x50E0
+#define DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG 0x510C
+
+#define DSAF_INODE_FIFO_WL_0_REG 0x6000
+#define DSAF_ONODE_FIFO_WL_0_REG 0x6020
+#define DSAF_XGE_GE_WORK_MODE_0_REG 0x6040
+#define DSAF_XGE_APP_RX_LINK_UP_0_REG 0x6080
+#define DSAF_NETPORT_CTRL_SIG_0_REG 0x60A0
+#define DSAF_XGE_CTRL_SIG_CFG_0_REG 0x60C0
+
+#define PPE_COM_CFG_QID_MODE_REG 0x0
+#define PPE_COM_INTEN_REG 0x110
+#define PPE_COM_RINT_REG 0x114
+#define PPE_COM_INTSTS_REG 0x118
+#define PPE_COM_COMMON_CNT_CLR_CE_REG 0x1120
+#define PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG 0x300
+#define PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG 0x600
+#define PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG 0x900
+#define PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG 0xC00
+#define PPE_COM_COMMON_CNT_CLR_CE_REG 0x1120
+
+#define PPE_CFG_TX_FIFO_THRSLD_REG 0x0
+#define PPE_CFG_RX_FIFO_THRSLD_REG 0x4
+#define PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG 0x8
+#define PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG 0xC
+#define PPE_CFG_PAUSE_IDLE_CNT_REG 0x10
+#define PPE_CFG_BUS_CTRL_REG 0x40
+#define PPE_CFG_TNL_TO_BE_RST_REG 0x48
+#define PPE_CURR_TNL_CAN_RST_REG 0x4C
+#define PPE_CFG_XGE_MODE_REG 0x80
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x84
+#define PPE_CFG_RX_PKT_MODE_REG 0x88
+#define PPE_CFG_RX_VLAN_TAG_REG 0x8C
+#define PPE_CFG_TAG_GEN_REG 0x90
+#define PPE_CFG_PARSE_TAG_REG 0x94
+#define PPE_CFG_PRO_CHECK_EN_REG 0x98
+#define PPE_INTEN_REG 0x100
+#define PPE_RINT_REG 0x104
+#define PPE_INTSTS_REG 0x108
+#define PPE_CFG_RX_PKT_INT_REG 0x140
+#define PPE_CFG_HEAT_DECT_TIME0_REG 0x144
+#define PPE_CFG_HEAT_DECT_TIME1_REG 0x148
+#define PPE_HIS_RX_SW_PKT_CNT_REG 0x200
+#define PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG 0x204
+#define PPE_HIS_RX_PKT_NO_BUF_CNT_REG 0x208
+#define PPE_HIS_TX_BD_CNT_REG 0x20C
+#define PPE_HIS_TX_PKT_CNT_REG 0x210
+#define PPE_HIS_TX_PKT_OK_CNT_REG 0x214
+#define PPE_HIS_TX_PKT_EPT_CNT_REG 0x218
+#define PPE_HIS_TX_PKT_CS_FAIL_CNT_REG 0x21C
+#define PPE_HIS_RX_APP_BUF_FAIL_CNT_REG 0x220
+#define PPE_HIS_RX_APP_BUF_WAIT_CNT_REG 0x224
+#define PPE_HIS_RX_PKT_DROP_FUL_CNT_REG 0x228
+#define PPE_HIS_RX_PKT_DROP_PRT_CNT_REG 0x22C
+#define PPE_TNL_0_5_CNT_CLR_CE_REG 0x300
+#define PPE_CFG_AXI_DBG_REG 0x304
+#define PPE_HIS_PRO_ERR_REG 0x308
+#define PPE_HIS_TNL_FIFO_ERR_REG 0x30C
+#define PPE_CURR_CFF_DATA_NUM_REG 0x310
+#define PPE_CURR_RX_ST_REG 0x314
+#define PPE_CURR_TX_ST_REG 0x318
+#define PPE_CURR_RX_FIFO0_REG 0x31C
+#define PPE_CURR_RX_FIFO1_REG 0x320
+#define PPE_CURR_TX_FIFO0_REG 0x324
+#define PPE_CURR_TX_FIFO1_REG 0x328
+#define PPE_ECO0_REG 0x32C
+#define PPE_ECO1_REG 0x330
+#define PPE_ECO2_REG 0x334
+
+#define RCB_COM_CFG_ENDIAN_REG 0x0
+#define RCB_COM_CFG_SYS_FSH_REG 0xC
+#define RCB_COM_CFG_INIT_FLAG_REG 0x10
+#define RCB_COM_CFG_PKT_REG 0x30
+#define RCB_COM_CFG_RINVLD_REG 0x34
+#define RCB_COM_CFG_FNA_REG 0x38
+#define RCB_COM_CFG_FA_REG 0x3C
+#define RCB_COM_CFG_PKT_TC_BP_REG 0x40
+#define RCB_COM_CFG_PPE_TNL_CLKEN_REG 0x44
+
+#define RCB_COM_INTMSK_TX_PKT_REG 0x3A0
+#define RCB_COM_RINT_TX_PKT_REG 0x3A8
+#define RCB_COM_INTMASK_ECC_ERR_REG 0x400
+#define RCB_COM_INTSTS_ECC_ERR_REG 0x408
+#define RCB_COM_EBD_SRAM_ERR_REG 0x410
+#define RCB_COM_RXRING_ERR_REG 0x41C
+#define RCB_COM_TXRING_ERR_REG 0x420
+#define RCB_COM_TX_FBD_ERR_REG 0x424
+#define RCB_SRAM_ECC_CHK_EN_REG 0x428
+#define RCB_SRAM_ECC_CHK0_REG 0x42C
+#define RCB_SRAM_ECC_CHK1_REG 0x430
+#define RCB_SRAM_ECC_CHK2_REG 0x434
+#define RCB_SRAM_ECC_CHK3_REG 0x438
+#define RCB_SRAM_ECC_CHK4_REG 0x43c
+#define RCB_SRAM_ECC_CHK5_REG 0x440
+#define RCB_ECC_ERR_ADDR0_REG 0x450
+#define RCB_ECC_ERR_ADDR3_REG 0x45C
+#define RCB_ECC_ERR_ADDR4_REG 0x460
+#define RCB_ECC_ERR_ADDR5_REG 0x464
+
+#define RCB_COM_SF_CFG_INTMASK_RING 0x480
+#define RCB_COM_SF_CFG_RING_STS 0x484
+#define RCB_COM_SF_CFG_RING 0x488
+#define RCB_COM_SF_CFG_INTMASK_BD 0x48C
+#define RCB_COM_SF_CFG_BD_RINT_STS 0x470
+#define RCB_COM_RCB_RD_BD_BUSY 0x490
+#define RCB_COM_RCB_FBD_CRT_EN 0x494
+#define RCB_COM_AXI_WR_ERR_INTMASK 0x498
+#define RCB_COM_AXI_ERR_STS 0x49C
+#define RCB_COM_CHK_TX_FBD_NUM_REG 0x4a0
+
+#define RCB_CFG_BD_NUM_REG 0x9000
+#define RCB_CFG_PKTLINE_REG 0x9050
+
+#define RCB_CFG_OVERTIME_REG 0x9300
+#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
+#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
+
+#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
+#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004
+#define RCB_RING_RX_RING_BD_NUM_REG 0x00008
+#define RCB_RING_RX_RING_BD_LEN_REG 0x0000C
+#define RCB_RING_RX_RING_PKTLINE_REG 0x00010
+#define RCB_RING_RX_RING_TAIL_REG 0x00018
+#define RCB_RING_RX_RING_HEAD_REG 0x0001C
+#define RCB_RING_RX_RING_FBDNUM_REG 0x00020
+#define RCB_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C
+
+#define RCB_RING_TX_RING_BASEADDR_L_REG 0x00040
+#define RCB_RING_TX_RING_BASEADDR_H_REG 0x00044
+#define RCB_RING_TX_RING_BD_NUM_REG 0x00048
+#define RCB_RING_TX_RING_BD_LEN_REG 0x0004C
+#define RCB_RING_TX_RING_PKTLINE_REG 0x00050
+#define RCB_RING_TX_RING_TAIL_REG 0x00058
+#define RCB_RING_TX_RING_HEAD_REG 0x0005C
+#define RCB_RING_TX_RING_FBDNUM_REG 0x00060
+#define RCB_RING_TX_RING_OFFSET_REG 0x00064
+#define RCB_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
+
+#define RCB_RING_PREFETCH_EN_REG 0x0007C
+#define RCB_RING_CFG_VF_NUM_REG 0x00080
+#define RCB_RING_ASID_REG 0x0008C
+#define RCB_RING_RX_VM_REG 0x00090
+#define RCB_RING_T0_BE_RST 0x00094
+#define RCB_RING_COULD_BE_RST 0x00098
+#define RCB_RING_WRR_WEIGHT_REG 0x0009c
+
+#define RCB_RING_INTMSK_RXWL_REG 0x000A0
+#define RCB_RING_INTSTS_RX_RING_REG 0x000A4
+#define RCB_RING_INTMSK_TXWL_REG 0x000AC
+#define RCB_RING_INTSTS_TX_RING_REG 0x000B0
+#define RCB_RING_INTMSK_RX_OVERTIME_REG 0x000B8
+#define RCB_RING_INTSTS_RX_OVERTIME_REG 0x000BC
+#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
+#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
+
+#define GMAC_DUPLEX_TYPE_REG 0x0008UL
+#define GMAC_FD_FC_TYPE_REG 0x000CUL
+#define GMAC_FC_TX_TIMER_REG 0x001CUL
+#define GMAC_FD_FC_ADDR_LOW_REG 0x0020UL
+#define GMAC_FD_FC_ADDR_HIGH_REG 0x0024UL
+#define GMAC_IPG_TX_TIMER_REG 0x0030UL
+#define GMAC_PAUSE_THR_REG 0x0038UL
+#define GMAC_MAX_FRM_SIZE_REG 0x003CUL
+#define GMAC_PORT_MODE_REG 0x0040UL
+#define GMAC_PORT_EN_REG 0x0044UL
+#define GMAC_PAUSE_EN_REG 0x0048UL
+#define GMAC_SHORT_RUNTS_THR_REG 0x0050UL
+#define GMAC_AN_NEG_STATE_REG 0x0058UL
+#define GMAC_TX_LOCAL_PAGE_REG 0x005CUL
+#define GMAC_TRANSMIT_CONTROL_REG 0x0060UL
+#define GMAC_REC_FILT_CONTROL_REG 0x0064UL
+#define GMAC_PTP_CONFIG_REG 0x0074UL
+
+#define GMAC_RX_OCTETS_TOTAL_OK_REG 0x0080UL
+#define GMAC_RX_OCTETS_BAD_REG 0x0084UL
+#define GMAC_RX_UC_PKTS_REG 0x0088UL
+#define GMAC_RX_MC_PKTS_REG 0x008CUL
+#define GMAC_RX_BC_PKTS_REG 0x0090UL
+#define GMAC_RX_PKTS_64OCTETS_REG 0x0094UL
+#define GMAC_RX_PKTS_65TO127OCTETS_REG 0x0098UL
+#define GMAC_RX_PKTS_128TO255OCTETS_REG 0x009CUL
+#define GMAC_RX_PKTS_255TO511OCTETS_REG 0x00A0UL
+#define GMAC_RX_PKTS_512TO1023OCTETS_REG 0x00A4UL
+#define GMAC_RX_PKTS_1024TO1518OCTETS_REG 0x00A8UL
+#define GMAC_RX_PKTS_1519TOMAXOCTETS_REG 0x00ACUL
+#define GMAC_RX_FCS_ERRORS_REG 0x00B0UL
+#define GMAC_RX_TAGGED_REG 0x00B4UL
+#define GMAC_RX_DATA_ERR_REG 0x00B8UL
+#define GMAC_RX_ALIGN_ERRORS_REG 0x00BCUL
+#define GMAC_RX_LONG_ERRORS_REG 0x00C0UL
+#define GMAC_RX_JABBER_ERRORS_REG 0x00C4UL
+#define GMAC_RX_PAUSE_MACCTRL_FRAM_REG 0x00C8UL
+#define GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG 0x00CCUL
+#define GMAC_RX_VERY_LONG_ERR_CNT_REG 0x00D0UL
+#define GMAC_RX_RUNT_ERR_CNT_REG 0x00D4UL
+#define GMAC_RX_SHORT_ERR_CNT_REG 0x00D8UL
+#define GMAC_RX_FILT_PKT_CNT_REG 0x00E8UL
+#define GMAC_RX_OCTETS_TOTAL_FILT_REG 0x00ECUL
+#define GMAC_OCTETS_TRANSMITTED_OK_REG 0x0100UL
+#define GMAC_OCTETS_TRANSMITTED_BAD_REG 0x0104UL
+#define GMAC_TX_UC_PKTS_REG 0x0108UL
+#define GMAC_TX_MC_PKTS_REG 0x010CUL
+#define GMAC_TX_BC_PKTS_REG 0x0110UL
+#define GMAC_TX_PKTS_64OCTETS_REG 0x0114UL
+#define GMAC_TX_PKTS_65TO127OCTETS_REG 0x0118UL
+#define GMAC_TX_PKTS_128TO255OCTETS_REG 0x011CUL
+#define GMAC_TX_PKTS_255TO511OCTETS_REG 0x0120UL
+#define GMAC_TX_PKTS_512TO1023OCTETS_REG 0x0124UL
+#define GMAC_TX_PKTS_1024TO1518OCTETS_REG 0x0128UL
+#define GMAC_TX_PKTS_1519TOMAXOCTETS_REG 0x012CUL
+#define GMAC_TX_EXCESSIVE_LENGTH_DROP_REG 0x014CUL
+#define GMAC_TX_UNDERRUN_REG 0x0150UL
+#define GMAC_TX_TAGGED_REG 0x0154UL
+#define GMAC_TX_CRC_ERROR_REG 0x0158UL
+#define GMAC_TX_PAUSE_FRAMES_REG 0x015CUL
+#define GAMC_RX_MAX_FRAME 0x0170UL
+#define GMAC_LINE_LOOP_BACK_REG 0x01A8UL
+#define GMAC_CF_CRC_STRIP_REG 0x01B0UL
+#define GMAC_MODE_CHANGE_EN_REG 0x01B4UL
+#define GMAC_SIXTEEN_BIT_CNTR_REG 0x01CCUL
+#define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
+#define GMAC_LOOP_REG 0x01DCUL
+#define GMAC_RECV_CONTROL_REG 0x01E0UL
+#define GMAC_VLAN_CODE_REG 0x01E8UL
+#define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
+#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
+#define GMAC_RX_FAIL_COMMA_CNT_REG 0x01F8UL
+#define GMAC_STATION_ADDR_LOW_0_REG 0x0200UL
+#define GMAC_STATION_ADDR_HIGH_0_REG 0x0204UL
+#define GMAC_STATION_ADDR_LOW_1_REG 0x0208UL
+#define GMAC_STATION_ADDR_HIGH_1_REG 0x020CUL
+#define GMAC_STATION_ADDR_LOW_2_REG 0x0210UL
+#define GMAC_STATION_ADDR_HIGH_2_REG 0x0214UL
+#define GMAC_STATION_ADDR_LOW_3_REG 0x0218UL
+#define GMAC_STATION_ADDR_HIGH_3_REG 0x021CUL
+#define GMAC_STATION_ADDR_LOW_4_REG 0x0220UL
+#define GMAC_STATION_ADDR_HIGH_4_REG 0x0224UL
+#define GMAC_STATION_ADDR_LOW_5_REG 0x0228UL
+#define GMAC_STATION_ADDR_HIGH_5_REG 0x022CUL
+#define GMAC_STATION_ADDR_LOW_MSK_0_REG 0x0230UL
+#define GMAC_STATION_ADDR_HIGH_MSK_0_REG 0x0234UL
+#define GMAC_STATION_ADDR_LOW_MSK_1_REG 0x0238UL
+#define GMAC_STATION_ADDR_HIGH_MSK_1_REG 0x023CUL
+#define GMAC_MAC_SKIP_LEN_REG 0x0240UL
+#define GMAC_TX_LOOP_PKT_PRI_REG 0x0378UL
+
+#define XGMAC_INT_STATUS_REG 0x0
+#define XGMAC_INT_ENABLE_REG 0x4
+#define XGMAC_INT_SET_REG 0x8
+#define XGMAC_IERR_U_INFO_REG 0xC
+#define XGMAC_OVF_INFO_REG 0x10
+#define XGMAC_OVF_CNT_REG 0x14
+#define XGMAC_PORT_MODE_REG 0x40
+#define XGMAC_CLK_ENABLE_REG 0x44
+#define XGMAC_RESET_REG 0x48
+#define XGMAC_LINK_CONTROL_REG 0x50
+#define XGMAC_LINK_STATUS_REG 0x54
+#define XGMAC_SPARE_REG 0xC0
+#define XGMAC_SPARE_CNT_REG 0xC4
+
+#define XGMAC_MAC_ENABLE_REG 0x100
+#define XGMAC_MAC_CONTROL_REG 0x104
+#define XGMAC_MAC_IPG_REG 0x120
+#define XGMAC_MAC_MSG_CRC_EN_REG 0x124
+#define XGMAC_MAC_MSG_IMG_REG 0x128
+#define XGMAC_MAC_MSG_FC_CFG_REG 0x12C
+#define XGMAC_MAC_MSG_TC_CFG_REG 0x130
+#define XGMAC_MAC_PAD_SIZE_REG 0x134
+#define XGMAC_MAC_MIN_PKT_SIZE_REG 0x138
+#define XGMAC_MAC_MAX_PKT_SIZE_REG 0x13C
+#define XGMAC_MAC_PAUSE_CTRL_REG 0x160
+#define XGMAC_MAC_PAUSE_TIME_REG 0x164
+#define XGMAC_MAC_PAUSE_GAP_REG 0x168
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG 0x16C
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG 0x170
+#define XGMAC_MAC_PAUSE_PEER_MAC_H_REG 0x174
+#define XGMAC_MAC_PAUSE_PEER_MAC_L_REG 0x178
+#define XGMAC_MAC_PFC_PRI_EN_REG 0x17C
+#define XGMAC_MAC_1588_CTRL_REG 0x180
+#define XGMAC_MAC_1588_TX_PORT_DLY_REG 0x184
+#define XGMAC_MAC_1588_RX_PORT_DLY_REG 0x188
+#define XGMAC_MAC_1588_ASYM_DLY_REG 0x18C
+#define XGMAC_MAC_1588_ADJUST_CFG_REG 0x190
+#define XGMAC_MAC_Y1731_ETH_TYPE_REG 0x194
+#define XGMAC_MAC_MIB_CONTROL_REG 0x198
+#define XGMAC_MAC_WAN_RATE_ADJUST_REG 0x19C
+#define XGMAC_MAC_TX_ERR_MARK_REG 0x1A0
+#define XGMAC_MAC_TX_LF_RF_CONTROL_REG 0x1A4
+#define XGMAC_MAC_RX_LF_RF_STATUS_REG 0x1A8
+#define XGMAC_MAC_TX_RUNT_PKT_CNT_REG 0x1C0
+#define XGMAC_MAC_RX_RUNT_PKT_CNT_REG 0x1C4
+#define XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG 0x1C8
+#define XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG 0x1CC
+#define XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG 0x1D0
+#define XGMAC_MAC_RX_ERR_MSG_CNT_REG 0x1D4
+#define XGMAC_MAC_RX_ERR_EFD_CNT_REG 0x1D8
+#define XGMAC_MAC_ERR_INFO_REG 0x1DC
+#define XGMAC_MAC_DBG_INFO_REG 0x1E0
+
+#define XGMAC_PCS_BASER_SYNC_THD_REG 0x330
+#define XGMAC_PCS_STATUS1_REG 0x404
+#define XGMAC_PCS_BASER_STATUS1_REG 0x410
+#define XGMAC_PCS_BASER_STATUS2_REG 0x414
+#define XGMAC_PCS_BASER_SEEDA_0_REG 0x420
+#define XGMAC_PCS_BASER_SEEDA_1_REG 0x424
+#define XGMAC_PCS_BASER_SEEDB_0_REG 0x428
+#define XGMAC_PCS_BASER_SEEDB_1_REG 0x42C
+#define XGMAC_PCS_BASER_TEST_CONTROL_REG 0x430
+#define XGMAC_PCS_BASER_TEST_ERR_CNT_REG 0x434
+#define XGMAC_PCS_DBG_INFO_REG 0x4C0
+#define XGMAC_PCS_DBG_INFO1_REG 0x4C4
+#define XGMAC_PCS_DBG_INFO2_REG 0x4C8
+#define XGMAC_PCS_DBG_INFO3_REG 0x4CC
+
+#define XGMAC_PMA_ENABLE_REG 0x700
+#define XGMAC_PMA_CONTROL_REG 0x704
+#define XGMAC_PMA_SIGNAL_STATUS_REG 0x708
+#define XGMAC_PMA_DBG_INFO_REG 0x70C
+#define XGMAC_PMA_FEC_ABILITY_REG 0x740
+#define XGMAC_PMA_FEC_CONTROL_REG 0x744
+#define XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG 0x750
+#define XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG 0x760
+
+#define XGMAC_TX_PKTS_FRAGMENT 0x0000
+#define XGMAC_TX_PKTS_UNDERSIZE 0x0008
+#define XGMAC_TX_PKTS_UNDERMIN 0x0010
+#define XGMAC_TX_PKTS_64OCTETS 0x0018
+#define XGMAC_TX_PKTS_65TO127OCTETS 0x0020
+#define XGMAC_TX_PKTS_128TO255OCTETS 0x0028
+#define XGMAC_TX_PKTS_256TO511OCTETS 0x0030
+#define XGMAC_TX_PKTS_512TO1023OCTETS 0x0038
+#define XGMAC_TX_PKTS_1024TO1518OCTETS 0x0040
+#define XGMAC_TX_PKTS_1519TOMAXOCTETS 0x0048
+#define XGMAC_TX_PKTS_1519TOMAXOCTETSOK 0x0050
+#define XGMAC_TX_PKTS_OVERSIZE 0x0058
+#define XGMAC_TX_PKTS_JABBER 0x0060
+#define XGMAC_TX_GOODPKTS 0x0068
+#define XGMAC_TX_GOODOCTETS 0x0070
+#define XGMAC_TX_TOTAL_PKTS 0x0078
+#define XGMAC_TX_TOTALOCTETS 0x0080
+#define XGMAC_TX_UNICASTPKTS 0x0088
+#define XGMAC_TX_MULTICASTPKTS 0x0090
+#define XGMAC_TX_BROADCASTPKTS 0x0098
+#define XGMAC_TX_PRI0PAUSEPKTS 0x00a0
+#define XGMAC_TX_PRI1PAUSEPKTS 0x00a8
+#define XGMAC_TX_PRI2PAUSEPKTS 0x00b0
+#define XGMAC_TX_PRI3PAUSEPKTS 0x00b8
+#define XGMAC_TX_PRI4PAUSEPKTS 0x00c0
+#define XGMAC_TX_PRI5PAUSEPKTS 0x00c8
+#define XGMAC_TX_PRI6PAUSEPKTS 0x00d0
+#define XGMAC_TX_PRI7PAUSEPKTS 0x00d8
+#define XGMAC_TX_MACCTRLPKTS 0x00e0
+#define XGMAC_TX_1731PKTS 0x00e8
+#define XGMAC_TX_1588PKTS 0x00f0
+#define XGMAC_RX_FROMAPPGOODPKTS 0x00f8
+#define XGMAC_RX_FROMAPPBADPKTS 0x0100
+#define XGMAC_TX_ERRALLPKTS 0x0108
+
+#define XGMAC_RX_PKTS_FRAGMENT 0x0110
+#define XGMAC_RX_PKTSUNDERSIZE 0x0118
+#define XGMAC_RX_PKTS_UNDERMIN 0x0120
+#define XGMAC_RX_PKTS_64OCTETS 0x0128
+#define XGMAC_RX_PKTS_65TO127OCTETS 0x0130
+#define XGMAC_RX_PKTS_128TO255OCTETS 0x0138
+#define XGMAC_RX_PKTS_256TO511OCTETS 0x0140
+#define XGMAC_RX_PKTS_512TO1023OCTETS 0x0148
+#define XGMAC_RX_PKTS_1024TO1518OCTETS 0x0150
+#define XGMAC_RX_PKTS_1519TOMAXOCTETS 0x0158
+#define XGMAC_RX_PKTS_1519TOMAXOCTETSOK 0x0160
+#define XGMAC_RX_PKTS_OVERSIZE 0x0168
+#define XGMAC_RX_PKTS_JABBER 0x0170
+#define XGMAC_RX_GOODPKTS 0x0178
+#define XGMAC_RX_GOODOCTETS 0x0180
+#define XGMAC_RX_TOTAL_PKTS 0x0188
+#define XGMAC_RX_TOTALOCTETS 0x0190
+#define XGMAC_RX_UNICASTPKTS 0x0198
+#define XGMAC_RX_MULTICASTPKTS 0x01a0
+#define XGMAC_RX_BROADCASTPKTS 0x01a8
+#define XGMAC_RX_PRI0PAUSEPKTS 0x01b0
+#define XGMAC_RX_PRI1PAUSEPKTS 0x01b8
+#define XGMAC_RX_PRI2PAUSEPKTS 0x01c0
+#define XGMAC_RX_PRI3PAUSEPKTS 0x01c8
+#define XGMAC_RX_PRI4PAUSEPKTS 0x01d0
+#define XGMAC_RX_PRI5PAUSEPKTS 0x01d8
+#define XGMAC_RX_PRI6PAUSEPKTS 0x01e0
+#define XGMAC_RX_PRI7PAUSEPKTS 0x01e8
+#define XGMAC_RX_MACCTRLPKTS 0x01f0
+#define XGMAC_TX_SENDAPPGOODPKTS 0x01f8
+#define XGMAC_TX_SENDAPPBADPKTS 0x0200
+#define XGMAC_RX_1731PKTS 0x0208
+#define XGMAC_RX_SYMBOLERRPKTS 0x0210
+#define XGMAC_RX_FCSERRPKTS 0x0218
+
+#define XGMAC_TRX_CORE_SRST_M 0x2080
+
+#define DSAF_CFG_EN_S 0
+#define DSAF_CFG_TC_MODE_S 1
+#define DSAF_CFG_CRC_EN_S 2
+#define DSAF_CFG_SBM_INIT_S 3
+#define DSAF_CFG_MIX_MODE_S 4
+#define DSAF_CFG_STP_MODE_S 5
+#define DSAF_CFG_LOCA_ADDR_EN_S 6
+
+#define DSAF_CNT_CLR_CE_S 0
+#define DSAF_SNAP_EN_S 1
+
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_XGE 41
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000 410
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_2500 103
+
+#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
+#define DSAF_PFC_UNINT_CNT_S 0
+
+#define DSAF_PPE_QID_CFG_M 0xFF
+#define DSAF_PPE_QID_CFG_S 0
+
+#define DSAF_SW_PORT_TYPE_M 3
+#define DSAF_SW_PORT_TYPE_S 0
+
+#define DSAF_STP_PORT_TYPE_M 7
+#define DSAF_STP_PORT_TYPE_S 0
+
+#define DSAF_INODE_IN_PORT_NUM_M 7
+#define DSAF_INODE_IN_PORT_NUM_S 0
+
+#define HNS_DSAF_I4TC_CFG 0x18688688
+#define HNS_DSAF_I8TC_CFG 0x18FAC688
+
+#define DSAF_SBM_CFG_SHCUT_EN_S 0
+#define DSAF_SBM_CFG_EN_S 1
+#define DSAF_SBM_CFG_MIB_EN_S 2
+#define DSAF_SBM_CFG_ECC_INVERT_EN_S 3
+
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S 20
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 11) - 1) << 20)
+
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG2_SET_BUF_NUM_S 0
+#define DSAF_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_S 10
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 10
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_TBL_TCAM_ADDR_S 0
+#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
+
+#define DSAF_TBL_LINE_ADDR_S 0
+#define DSAF_TBL_LINE_ADDR_M ((1ULL << 15) - 1)
+
+#define DSAF_TBL_MCAST_CFG4_VM128_112_S 0
+#define DSAF_TBL_MCAST_CFG4_VM128_112_M (((1ULL << 7) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG4_ITEM_VLD_S 7
+#define DSAF_TBL_MCAST_CFG4_OLD_EN_S 8
+
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_S 0
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_M (((1ULL << 6) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG0_VM25_0_S 6
+#define DSAF_TBL_MCAST_CFG0_VM25_0_M (((1ULL << 26) - 1) << 6)
+
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_S 0
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_UCAST_CFG1_DVC_S 8
+#define DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S 9
+#define DSAF_TBL_UCAST_CFG1_ITEM_VLD_S 10
+#define DSAF_TBL_UCAST_CFG1_OLD_EN_S 11
+
+#define DSAF_TBL_LINE_CFG_OUT_PORT_S 0
+#define DSAF_TBL_LINE_CFG_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_LINE_CFG_DVC_S 8
+#define DSAF_TBL_LINE_CFG_MAC_DISCARD_S 9
+
+#define DSAF_TBL_PUL_OLD_RSLT_RE_S 0
+#define DSAF_TBL_PUL_MCAST_VLD_S 1
+#define DSAF_TBL_PUL_TCAM_DATA_VLD_S 2
+#define DSAF_TBL_PUL_UCAST_VLD_S 3
+#define DSAF_TBL_PUL_LINE_VLD_S 4
+#define DSAF_TBL_PUL_TCAM_LOAD_S 5
+#define DSAF_TBL_PUL_LINE_LOAD_S 6
+
+#define DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S 0
+#define DSAF_TBL_DFX_UC_LKUP_NUM_EN_S 1
+#define DSAF_TBL_DFX_MC_LKUP_NUM_EN_S 2
+#define DSAF_TBL_DFX_BC_LKUP_NUM_EN_S 3
+#define DSAF_TBL_DFX_RAM_ERR_INJECT_EN_S 4
+
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_S 0
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_M (((1ULL << 10) - 1) << 0)
+#define DSAF_VOQ_BP_ALL_UPTHRD_S 10
+#define DSAF_VOQ_BP_ALL_UPTHRD_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_XGE_GE_WORK_MODE_S 0
+#define DSAF_XGE_GE_LOOPBACK_S 1
+
+#define DSAF_FC_XGE_TX_PAUSE_S 0
+#define DSAF_REGS_XGE_CNT_CAR_S 1
+
+#define PPE_CFG_QID_MODE_DEF_QID_S 0
+#define PPE_CFG_QID_MODE_DEF_QID_M (0xff << PPE_CFG_QID_MODE_DEF_QID_S)
+
+#define PPE_CFG_QID_MODE_CF_QID_MODE_S 8
+#define PPE_CFG_QID_MODE_CF_QID_MODE_M (0x7 << PPE_CFG_QID_MODE_CF_QID_MODE_S)
+
+#define PPE_CNT_CLR_CE_B 0
+#define PPE_CNT_CLR_SNAP_EN_B 1
+
+#define PPE_COMMON_CNT_CLR_CE_B 0
+#define PPE_COMMON_CNT_CLR_SNAP_EN_B 1
+
+#define GMAC_DUPLEX_TYPE_B 0
+
+#define GMAC_FC_TX_TIMER_S 0
+#define GMAC_FC_TX_TIMER_M 0xffff
+
+#define GMAC_MAX_FRM_SIZE_S 0
+#define GMAC_MAX_FRM_SIZE_M 0xffff
+
+#define GMAC_PORT_MODE_S 0
+#define GMAC_PORT_MODE_M 0xf
+
+#define GMAC_RGMII_1000M_DELAY_B 4
+#define GMAC_MII_TX_EDGE_SEL_B 5
+#define GMAC_FIFO_ERR_AUTO_RST_B 6
+#define GMAC_DBG_CLK_LOS_MSK_B 7
+
+#define GMAC_PORT_RX_EN_B 1
+#define GMAC_PORT_TX_EN_B 2
+
+#define GMAC_PAUSE_EN_RX_FDFC_B 0
+#define GMAC_PAUSE_EN_TX_FDFC_B 1
+#define GMAC_PAUSE_EN_TX_HDFC_B 2
+
+#define GMAC_SHORT_RUNTS_THR_S 0
+#define GMAC_SHORT_RUNTS_THR_M 0x1f
+
+#define GMAC_AN_NEG_STAT_FD_B 5
+#define GMAC_AN_NEG_STAT_HD_B 6
+#define GMAC_AN_NEG_STAT_RF1_DUPLIEX_B 12
+#define GMAC_AN_NEG_STAT_RF2_B 13
+
+#define GMAC_AN_NEG_STAT_NP_LNK_OK_B 15
+#define GMAC_AN_NEG_STAT_RX_SYNC_OK_B 20
+#define GMAC_AN_NEG_STAT_AN_DONE_B 21
+
+#define GMAC_AN_NEG_STAT_PS_S 7
+#define GMAC_AN_NEG_STAT_PS_M (0x3 << GMAC_AN_NEG_STAT_PS_S)
+
+#define GMAC_AN_NEG_STAT_SPEED_S 10
+#define GMAC_AN_NEG_STAT_SPEED_M (0x3 << GMAC_AN_NEG_STAT_SPEED_S)
+
+#define GMAC_TX_AN_EN_B 5
+#define GMAC_TX_CRC_ADD_B 6
+#define GMAC_TX_PAD_EN_B 7
+
+#define GMAC_LINE_LOOPBACK_B 0
+
+#define GMAC_LP_REG_CF_EXT_DRV_LP_B 1
+#define GMAC_LP_REG_CF2MI_LP_EN_B 2
+
+#define GMAC_MODE_CHANGE_EB_B 0
+
+#define GMAC_RECV_CTRL_STRIP_PAD_EN_B 3
+#define GMAC_RECV_CTRL_RUNT_PKT_EN_B 4
+
+#define GMAC_TX_LOOP_PKT_HIG_PRI_B 0
+#define GMAC_TX_LOOP_PKT_EN_B 1
+
+#define XGMAC_PORT_MODE_TX_S 0x0
+#define XGMAC_PORT_MODE_TX_M (0x3 << XGMAC_PORT_MODE_TX_S)
+#define XGMAC_PORT_MODE_TX_40G_B 0x3
+#define XGMAC_PORT_MODE_RX_S 0x4
+#define XGMAC_PORT_MODE_RX_M (0x3 << XGMAC_PORT_MODE_RX_S)
+#define XGMAC_PORT_MODE_RX_40G_B 0x7
+
+#define XGMAC_ENABLE_TX_B 0
+#define XGMAC_ENABLE_RX_B 1
+
+#define XGMAC_CTL_TX_FCS_B 0
+#define XGMAC_CTL_TX_PAD_B 1
+#define XGMAC_CTL_TX_PREAMBLE_TRANS_B 3
+#define XGMAC_CTL_TX_UNDER_MIN_ERR_B 4
+#define XGMAC_CTL_TX_TRUNCATE_B 5
+#define XGMAC_CTL_TX_1588_B 8
+#define XGMAC_CTL_TX_1731_B 9
+#define XGMAC_CTL_TX_PFC_B 10
+#define XGMAC_CTL_RX_FCS_B 16
+#define XGMAC_CTL_RX_FCS_STRIP_B 17
+#define XGMAC_CTL_RX_PREAMBLE_TRANS_B 19
+#define XGMAC_CTL_RX_UNDER_MIN_ERR_B 20
+#define XGMAC_CTL_RX_TRUNCATE_B 21
+#define XGMAC_CTL_RX_1588_B 24
+#define XGMAC_CTL_RX_1731_B 25
+#define XGMAC_CTL_RX_PFC_B 26
+
+#define XGMAC_PMA_FEC_CTL_TX_B 0
+#define XGMAC_PMA_FEC_CTL_RX_B 1
+#define XGMAC_PMA_FEC_CTL_ERR_EN 2
+#define XGMAC_PMA_FEC_CTL_ERR_SH 3
+
+#define XGMAC_PAUSE_CTL_TX_B 0
+#define XGMAC_PAUSE_CTL_RX_B 1
+#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
+#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
+
+static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(base);
+
+ writel(value, reg_addr + reg);
+}
+
+#define dsaf_write_dev(a, reg, value) \
+ dsaf_write_reg((a)->io_base, (reg), (value))
+
+static inline u32 dsaf_read_reg(u8 *base, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define dsaf_read_dev(a, reg) \
+ dsaf_read_reg((a)->io_base, (reg))
+
+#define dsaf_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= (((val) << (shift)) & (mask)); \
+ } while (0)
+
+#define dsaf_set_bit(origin, shift, val) \
+ dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
+
+static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+ u32 val)
+{
+ u32 origin = dsaf_read_reg(base, reg);
+
+ dsaf_set_field(origin, mask, shift, val);
+ dsaf_write_reg(base, reg, origin);
+}
+
+#define dsaf_set_dev_field(dev, reg, mask, shift, val) \
+ dsaf_set_reg_field((dev)->io_base, (reg), (mask), (shift), (val))
+
+#define dsaf_set_dev_bit(dev, reg, bit, val) \
+ dsaf_set_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit), (val))
+
+#define dsaf_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define dsaf_get_bit(origin, shift) \
+ dsaf_get_field((origin), (1ull << (shift)), (shift))
+
+static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+{
+ u32 origin;
+
+ origin = dsaf_read_reg(base, reg);
+ return dsaf_get_field(origin, mask, shift);
+}
+
+#define dsaf_get_dev_field(dev, reg, mask, shift) \
+ dsaf_get_reg_field((dev)->io_base, (reg), (mask), (shift))
+
+#define dsaf_get_dev_bit(dev, reg, bit) \
+ dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
+
+#define dsaf_write_b(addr, data)\
+ writeb((data), (__iomem unsigned char *)(addr))
+#define dsaf_read_b(addr)\
+ readb((__iomem unsigned char *)(addr))
+
+#define hns_mac_reg_read64(drv, offset) \
+ readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+
+#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
new file mode 100644
index 000000000000..802d55457f19
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_xgmac.h"
+#include "hns_dsaf_reg.h"
+
+static const struct mac_stats_string g_xgmac_stats_string[] = {
+ {"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)},
+ {"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)},
+ {"xgmac_tx_total_pkts_minto64", MAC_STATS_FIELD_OFF(tx_under_min_pkts)},
+ {"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)},
+ {"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+ {"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+ {"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+ {"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+ {"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+ {"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+ {"xgmac_tx_good_pkts_1519tomax",
+ MAC_STATS_FIELD_OFF(tx_1519tomax_good)},
+ {"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)},
+ {"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+ {"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)},
+ {"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+ {"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)},
+ {"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)},
+ {"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+ {"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+ {"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+ {"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)},
+ {"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)},
+ {"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)},
+ {"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)},
+ {"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)},
+ {"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)},
+ {"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)},
+ {"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)},
+ {"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)},
+ {"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)},
+ {"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)},
+ {"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)},
+ {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)},
+ {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)},
+
+ {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)},
+ {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)},
+ {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)},
+ {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)},
+ {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+ {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+ {"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+ {"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+ {"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+ {"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+ {"xgmac_rx_good_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax_good)},
+ {"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)},
+ {"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+ {"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)},
+ {"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+ {"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)},
+ {"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)},
+ {"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+ {"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+ {"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+ {"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)},
+ {"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)},
+ {"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)},
+ {"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)},
+ {"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)},
+ {"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)},
+ {"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)},
+ {"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+ {"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)},
+ {"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)},
+ {"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)},
+ {"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)},
+ {"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)}
+};
+
+/**
+ *hns_xgmac_tx_enable - xgmac port tx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value)
+{
+ dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value);
+}
+
+/**
+ *hns_xgmac_rx_enable - xgmac port rx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value)
+{
+ dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value);
+}
+
+/**
+ *hns_xgmac_enable - enable xgmac port
+ *@drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+ u32 port = drv->mac_id;
+
+ hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1);
+ mdelay(10);
+
+ /*enable XGE rX/tX */
+ if (mode == MAC_COMM_MODE_TX) {
+ hns_xgmac_tx_enable(drv, 1);
+ } else if (mode == MAC_COMM_MODE_RX) {
+ hns_xgmac_rx_enable(drv, 1);
+ } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+ hns_xgmac_tx_enable(drv, 1);
+ hns_xgmac_rx_enable(drv, 1);
+ } else {
+ dev_err(drv->dev, "error mac mode:%d\n", mode);
+ }
+}
+
+/**
+ *hns_xgmac_disable - disable xgmac port
+ *@mac_drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+ u32 port = drv->mac_id;
+
+ if (mode == MAC_COMM_MODE_TX) {
+ hns_xgmac_tx_enable(drv, 0);
+ } else if (mode == MAC_COMM_MODE_RX) {
+ hns_xgmac_rx_enable(drv, 0);
+ } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+ hns_xgmac_tx_enable(drv, 0);
+ hns_xgmac_rx_enable(drv, 0);
+ }
+
+ mdelay(10);
+ hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0);
+}
+
+/**
+ *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable
+ *@drv: mac driver
+ *@tx_value: tx value
+ *@rx_value: rx value
+ *return status
+ */
+static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value,
+ u32 rx_value)
+{
+ u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+
+ dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value);
+ dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value);
+ dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin);
+}
+
+/* clr exc irq for xge*/
+static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+
+ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue);
+ dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue);
+}
+
+/**
+ *hns_xgmac_init - initialize XGE
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_init(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+ u32 port = drv->mac_id;
+
+ hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0);
+ mdelay(100);
+ hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1);
+
+ mdelay(100);
+ hns_xgmac_exc_irq_en(drv, 0);
+
+ hns_xgmac_pma_fec_enable(drv, 0x0, 0x0);
+
+ hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+}
+
+/**
+ *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+
+ dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval);
+ dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval);
+ dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval);
+ dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin);
+}
+
+/**
+ *hns_xgmac_pausefrm_cfg - set pause param about xgmac
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+
+ dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en);
+ dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
+}
+
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val);
+}
+
+/**
+ *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable rx pause param
+ */
+static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+ XGMAC_PAUSE_CTL_RX_B, !!enable);
+}
+
+/**
+ *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable tx pause param
+ */
+static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+ XGMAC_PAUSE_CTL_TX_B, !!enable);
+
+ /*if enable is not zero ,set tx pause time */
+ if (enable)
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable);
+}
+
+/**
+ *hns_xgmac_get_id - get xgmac port id
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_get_id(void *mac_drv, u8 *mac_id)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *mac_id = drv->mac_id;
+}
+
+/**
+ *hns_xgmac_config_max_frame_length - set xgmac max frame length
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
+}
+
+void hns_xgmac_update_stats(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
+
+ /* TX */
+ hw_stats->tx_fragment_err
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+ hw_stats->tx_undersize
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+ hw_stats->tx_under_min_pkts
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+ hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+ hw_stats->tx_65to127
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+ hw_stats->tx_128to255
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+ hw_stats->tx_256to511
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+ hw_stats->tx_512to1023
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+ hw_stats->tx_1024to1518
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+ hw_stats->tx_1519tomax
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+ hw_stats->tx_1519tomax_good
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+ hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+ hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+ hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+ hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+ hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+ hw_stats->tx_total_bytes
+ = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+ hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+ hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+ hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+ hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+ hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+ hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+ hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+ hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+ hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+ hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+ hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+ hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+ hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+ hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+ hw_stats->rx_good_from_sw
+ = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+ hw_stats->rx_bad_from_sw
+ = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+ hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+
+ /* RX */
+ hw_stats->rx_fragment_err
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+ hw_stats->rx_undersize
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+ hw_stats->rx_under_min
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+ hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+ hw_stats->rx_65to127
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+ hw_stats->rx_128to255
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+ hw_stats->rx_256to511
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+ hw_stats->rx_512to1023
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+ hw_stats->rx_1024to1518
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+ hw_stats->rx_1519tomax
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+ hw_stats->rx_1519tomax_good
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+ hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+ hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+ hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+ hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+ hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+ hw_stats->rx_total_bytes
+ = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+ hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+ hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+ hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+ hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+ hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+ hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+ hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+ hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+ hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+ hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+ hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+
+ hw_stats->rx_unknown_ctrl
+ = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+ hw_stats->tx_good_to_sw
+ = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+ hw_stats->tx_bad_to_sw
+ = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+ hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+ hw_stats->rx_symbol_err
+ = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+ hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+}
+
+/**
+ *hns_xgmac_free - free xgmac driver
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_free(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ u32 mac_id = drv->mac_id;
+
+ hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0);
+}
+
+/**
+ *hns_xgmac_get_info - get xgmac information
+ *@mac_drv: mac driver
+ *@mac_info:mac information
+ */
+static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 pause_time, pause_ctrl, port_mode, ctrl_val;
+
+ ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+ mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B);
+ mac_info->auto_neg = 0;
+
+ pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+ mac_info->tx_pause_time = pause_time;
+
+ port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+ mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M,
+ XGMAC_PORT_MODE_TX_S) &&
+ dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M,
+ XGMAC_PORT_MODE_RX_S);
+ mac_info->duplex = 1;
+ mac_info->speed = MAC_SPEED_10000;
+
+ pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+ mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_pausefrm_cfg - get xgmac pause param
+ *@mac_drv: mac driver
+ *@rx_en:xgmac rx pause enable
+ *@tx_en:xgmac tx pause enable
+ */
+static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 pause_ctrl;
+
+ pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ *rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+ *tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_link_status - get xgmac link status
+ *@mac_drv: mac driver
+ *@link_stat: xgmac link stat
+ */
+static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+}
+
+/**
+ *hns_xgmac_get_regs - dump xgmac regs
+ *@mac_drv: mac driver
+ *@cmd:ethtool cmd
+ *@data:data for value of regs
+ */
+static void hns_xgmac_get_regs(void *mac_drv, void *data)
+{
+ u32 i = 0;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 *regs = data;
+ u64 qtmp;
+
+ /* base config registers */
+ regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG);
+ regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG);
+ regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG);
+ regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG);
+ regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG);
+ regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG);
+ regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+ regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG);
+ regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG);
+ regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG);
+ regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+
+ regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG);
+ regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG);
+ regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG);
+ regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+ regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG);
+ regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG);
+ regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG);
+ regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG);
+ regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG);
+ regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG);
+ regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG);
+ regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG);
+ regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+ regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG);
+ regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG);
+ regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG);
+ regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG);
+ regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG);
+ regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG);
+ regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG);
+ regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG);
+ regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG);
+ regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG);
+ regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG);
+
+ regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG);
+ regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG);
+ regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG);
+ regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG);
+ regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG);
+ regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG);
+ regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG);
+ regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG);
+ regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG);
+ regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG);
+ regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG);
+ regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG);
+ regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG);
+ regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG);
+ regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG);
+
+ regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG);
+ regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG);
+ regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG);
+ regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG);
+ regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG);
+ regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG);
+ regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG);
+ regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG);
+ regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG);
+ regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG);
+ regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG);
+ regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG);
+ regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG);
+ regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG);
+
+ regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG);
+ regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG);
+ regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG);
+ regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG);
+ regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG);
+ regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+ regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG);
+ regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG);
+
+ /* status registers */
+#define hns_xgmac_cpy_q(p, q) \
+ do {\
+ *(p) = (u32)(q);\
+ *((p) + 1) = (u32)((q) >> 32);\
+ } while (0)
+
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+ hns_xgmac_cpy_q(&regs[73], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+ hns_xgmac_cpy_q(&regs[75], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+ hns_xgmac_cpy_q(&regs[77], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+ hns_xgmac_cpy_q(&regs[79], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+ hns_xgmac_cpy_q(&regs[81], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+ hns_xgmac_cpy_q(&regs[83], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+ hns_xgmac_cpy_q(&regs[85], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+ hns_xgmac_cpy_q(&regs[87], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+ hns_xgmac_cpy_q(&regs[89], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+ hns_xgmac_cpy_q(&regs[91], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+ hns_xgmac_cpy_q(&regs[93], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+ hns_xgmac_cpy_q(&regs[95], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+ hns_xgmac_cpy_q(&regs[97], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+ hns_xgmac_cpy_q(&regs[99], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+ hns_xgmac_cpy_q(&regs[101], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+ hns_xgmac_cpy_q(&regs[103], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+ hns_xgmac_cpy_q(&regs[105], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+ hns_xgmac_cpy_q(&regs[107], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+ hns_xgmac_cpy_q(&regs[109], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+ hns_xgmac_cpy_q(&regs[111], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[113], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[115], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[117], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[119], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[121], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[123], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[125], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[127], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+ hns_xgmac_cpy_q(&regs[129], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+ hns_xgmac_cpy_q(&regs[131], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+ hns_xgmac_cpy_q(&regs[133], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+ hns_xgmac_cpy_q(&regs[135], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+ hns_xgmac_cpy_q(&regs[137], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+ hns_xgmac_cpy_q(&regs[139], qtmp);
+
+ /* RX */
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+ hns_xgmac_cpy_q(&regs[141], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+ hns_xgmac_cpy_q(&regs[143], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+ hns_xgmac_cpy_q(&regs[145], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+ hns_xgmac_cpy_q(&regs[147], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+ hns_xgmac_cpy_q(&regs[149], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+ hns_xgmac_cpy_q(&regs[151], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+ hns_xgmac_cpy_q(&regs[153], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+ hns_xgmac_cpy_q(&regs[155], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+ hns_xgmac_cpy_q(&regs[157], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+ hns_xgmac_cpy_q(&regs[159], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+ hns_xgmac_cpy_q(&regs[161], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+ hns_xgmac_cpy_q(&regs[163], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+ hns_xgmac_cpy_q(&regs[165], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+ hns_xgmac_cpy_q(&regs[167], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+ hns_xgmac_cpy_q(&regs[169], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+ hns_xgmac_cpy_q(&regs[171], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+ hns_xgmac_cpy_q(&regs[173], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+ hns_xgmac_cpy_q(&regs[175], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+ hns_xgmac_cpy_q(&regs[177], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+ hns_xgmac_cpy_q(&regs[179], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[181], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[183], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[185], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[187], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[189], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[191], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[193], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[195], qtmp);
+
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+ hns_xgmac_cpy_q(&regs[197], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+ hns_xgmac_cpy_q(&regs[199], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+ hns_xgmac_cpy_q(&regs[201], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+ hns_xgmac_cpy_q(&regs[203], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+ hns_xgmac_cpy_q(&regs[205], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+ hns_xgmac_cpy_q(&regs[207], qtmp);
+
+ /* mark end of mac regs */
+ for (i = 208; i < 214; i++)
+ regs[i] = 0xaaaaaaaa;
+}
+
+/**
+ *hns_xgmac_get_stats - get xgmac statistic
+ *@mac_drv: mac driver
+ *@data:data for value of stats regs
+ */
+static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
+{
+ u32 i;
+ u64 *buf = data;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = NULL;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+ buf[i] = DSAF_STATS_READ(hw_stats,
+ g_xgmac_stats_string[i].offset);
+ }
+}
+
+/**
+ *hns_xgmac_get_strings - get xgmac strings name
+ *@stringset: type of values in data
+ *@data:data for value of string name
+ */
+static void hns_xgmac_get_strings(u32 stringset, u8 *data)
+{
+ char *buff = (char *)data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+ snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+}
+
+/**
+ *hns_xgmac_get_sset_count - get xgmac string set count
+ *@stringset: type of values in data
+ *return xgmac string set count
+ */
+static int hns_xgmac_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ARRAY_SIZE(g_xgmac_stats_string);
+
+ return 0;
+}
+
+/**
+ *hns_xgmac_get_regs_count - get xgmac regs count
+ *return xgmac regs count
+ */
+static int hns_xgmac_get_regs_count(void)
+{
+ return ETH_XGMAC_DUMP_NUM;
+}
+
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+ struct mac_driver *mac_drv;
+
+ mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+ if (!mac_drv)
+ return NULL;
+
+ mac_drv->mac_init = hns_xgmac_init;
+ mac_drv->mac_enable = hns_xgmac_enable;
+ mac_drv->mac_disable = hns_xgmac_disable;
+
+ mac_drv->mac_id = mac_param->mac_id;
+ mac_drv->mac_mode = mac_param->mac_mode;
+ mac_drv->io_base = mac_param->vaddr;
+ mac_drv->dev = mac_param->dev;
+ mac_drv->mac_cb = mac_cb;
+
+ mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr;
+ mac_drv->set_an_mode = NULL;
+ mac_drv->config_loopback = NULL;
+ mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc;
+ mac_drv->config_half_duplex = NULL;
+ mac_drv->set_rx_ignore_pause_frames =
+ hns_xgmac_set_rx_ignore_pause_frames;
+ mac_drv->mac_get_id = hns_xgmac_get_id;
+ mac_drv->mac_free = hns_xgmac_free;
+ mac_drv->adjust_link = NULL;
+ mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
+ mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length;
+ mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg;
+ mac_drv->autoneg_stat = NULL;
+ mac_drv->get_info = hns_xgmac_get_info;
+ mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg;
+ mac_drv->get_link_status = hns_xgmac_get_link_status;
+ mac_drv->get_regs = hns_xgmac_get_regs;
+ mac_drv->get_ethtool_stats = hns_xgmac_get_stats;
+ mac_drv->get_sset_count = hns_xgmac_get_sset_count;
+ mac_drv->get_regs_count = hns_xgmac_get_regs_count;
+ mac_drv->get_strings = hns_xgmac_get_strings;
+ mac_drv->update_stats = hns_xgmac_update_stats;
+
+ return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
new file mode 100644
index 000000000000..139f7297c7b4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_XGMAC_H
+#define _HNS_XGMAC_H
+
+#define ETH_XGMAC_DUMP_NUM (214)
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
new file mode 100644
index 000000000000..08cef0dfb5db
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -0,0 +1,1642 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "hnae.h"
+#include "hns_enet.h"
+
+#define NIC_MAX_Q_PER_VF 16
+#define HNS_NIC_TX_TIMEOUT (5 * HZ)
+
+#define SERVICE_TIMER_HZ (1 * HZ)
+
+#define NIC_TX_CLEAN_MAX_NUM 256
+#define NIC_RX_CLEAN_MAX_NUM 64
+
+#define RCB_IRQ_NOT_INITED 0
+#define RCB_IRQ_INITED 1
+
+static void fill_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type)
+{
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct sk_buff *skb;
+ __be16 protocol;
+ u32 ip_offset;
+ u32 asid_bufnum_pid = 0;
+ u32 flag_ipoffset = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16((u16)size);
+
+ /*config bd buffer end */
+ flag_ipoffset |= 1 << HNS_TXD_VLD_B;
+
+ asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
+
+ if (type == DESC_TYPE_SKB) {
+ skb = (struct sk_buff *)priv;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ protocol = skb->protocol;
+ ip_offset = ETH_HLEN;
+
+ /*if it is a SW VLAN check the next protocol*/
+ if (protocol == htons(ETH_P_8021Q)) {
+ ip_offset += VLAN_HLEN;
+ protocol = vlan_get_protocol(skb);
+ skb->protocol = protocol;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
+ /* check for tcp/udp header */
+ flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* ipv6 has not l3 cs, check for L4 header */
+ flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+ }
+
+ flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
+ }
+ }
+
+ flag_ipoffset |= frag_end << HNS_TXD_FE_B;
+
+ desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
+ desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
+
+ ring_ptr_move_fw(ring, next_to_use);
+}
+
+static void unfill_desc(struct hnae_ring *ring)
+{
+ ring_ptr_move_bw(ring, next_to_use);
+}
+
+int hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct device *dev = priv->dev;
+ struct hnae_ring *ring = ring_data->ring;
+ struct netdev_queue *dev_queue;
+ struct skb_frag_struct *frag;
+ int buf_num;
+ dma_addr_t dma;
+ int size, next_to_use;
+ int i, j;
+ struct sk_buff *new_skb;
+
+ assert(ring->max_desc_num_per_pkt <= ring->desc_num);
+
+ /* no. of segments (plus a header) */
+ buf_num = skb_shinfo(skb)->nr_frags + 1;
+
+ if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
+ if (ring_space(ring) < 1) {
+ ring->stats.tx_busy++;
+ goto out_net_tx_busy;
+ }
+
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb) {
+ ring->stats.sw_err_cnt++;
+ netdev_err(ndev, "no memory to xmit!\n");
+ goto out_err_tx_ok;
+ }
+
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ buf_num = 1;
+ assert(skb_shinfo(skb)->nr_frags == 1);
+ } else if (buf_num > ring_space(ring)) {
+ ring->stats.tx_busy++;
+ goto out_net_tx_busy;
+ }
+ next_to_use = ring->next_to_use;
+
+ /* fill the first part */
+ size = skb_headlen(skb);
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ netdev_err(ndev, "TX head DMA map failed\n");
+ ring->stats.sw_err_cnt++;
+ goto out_err_tx_ok;
+ }
+ fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num,
+ DESC_TYPE_SKB);
+
+ /* fill the fragments */
+ for (i = 1; i < buf_num; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
+ ring->stats.sw_err_cnt++;
+ goto out_map_frag_fail;
+ }
+ fill_desc(ring, skb_frag_page(frag), size, dma,
+ buf_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE);
+ }
+
+ /*complete translate all packets*/
+ dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
+ netdev_tx_sent_queue(dev_queue, skb->len);
+
+ wmb(); /* commit all data before submit */
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
+ hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
+ ring->stats.tx_pkts++;
+ ring->stats.tx_bytes += skb->len;
+
+ return NETDEV_TX_OK;
+
+out_map_frag_fail:
+
+ for (j = i - 1; j > 0; j--) {
+ unfill_desc(ring);
+ next_to_use = ring->next_to_use;
+ dma_unmap_page(dev, ring->desc_cb[next_to_use].dma,
+ ring->desc_cb[next_to_use].length,
+ DMA_TO_DEVICE);
+ }
+
+ unfill_desc(ring);
+ next_to_use = ring->next_to_use;
+ dma_unmap_single(dev, ring->desc_cb[next_to_use].dma,
+ ring->desc_cb[next_to_use].length, DMA_TO_DEVICE);
+
+out_err_tx_ok:
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+out_net_tx_busy:
+
+ netif_stop_subqueue(ndev, skb->queue_mapping);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+ return NETDEV_TX_BUSY;
+}
+
+/**
+ * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
+ unsigned int max_size)
+{
+ unsigned char *network;
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_size < ETH_HLEN)
+ return max_size;
+
+ /* initialize network frame pointer */
+ network = data;
+
+ /* set first protocol and move network header forward */
+ network += ETH_HLEN;
+
+ /* handle any vlan tag if present */
+ if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
+ == HNS_RX_FLAG_VLAN_PRESENT) {
+ if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
+ return max_size;
+
+ network += VLAN_HLEN;
+ }
+
+ /* handle L3 protocols */
+ if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
+ == HNS_RX_FLAG_L3ID_IPV4) {
+ if ((typeof(max_size))(network - data) >
+ (max_size - sizeof(struct iphdr)))
+ return max_size;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return network - data;
+
+ /* record next protocol if header is present */
+ } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
+ == HNS_RX_FLAG_L3ID_IPV6) {
+ if ((typeof(max_size))(network - data) >
+ (max_size - sizeof(struct ipv6hdr)))
+ return max_size;
+
+ /* record next protocol */
+ hlen = sizeof(struct ipv6hdr);
+ } else {
+ return network - data;
+ }
+
+ /* relocate pointer to start of L4 header */
+ network += hlen;
+
+ /* finally sort out TCP/UDP */
+ if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
+ == HNS_RX_FLAG_L4ID_TCP) {
+ if ((typeof(max_size))(network - data) >
+ (max_size - sizeof(struct tcphdr)))
+ return max_size;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hlen = (network[12] & 0xF0) >> 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct tcphdr))
+ return network - data;
+
+ network += hlen;
+ } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
+ == HNS_RX_FLAG_L4ID_UDP) {
+ if ((typeof(max_size))(network - data) >
+ (max_size - sizeof(struct udphdr)))
+ return max_size;
+
+ network += sizeof(struct udphdr);
+ }
+
+ /* If everything has gone correctly network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
+ */
+ if ((typeof(max_size))(network - data) < max_size)
+ return network - data;
+ else
+ return max_size;
+}
+
+static void
+hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset)
+{
+ /* avoid re-using remote pages,flag default unreuse */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) {
+ /* move offset up to the next cache line */
+ desc_cb->page_offset += tsize;
+
+ if (desc_cb->page_offset <= last_offset) {
+ desc_cb->reuse_flag = 1;
+ /* bump ref count on page before it is given*/
+ get_page(desc_cb->priv);
+ }
+ }
+}
+
+static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
+ struct sk_buff **out_skb, int *out_bnum)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct sk_buff *skb;
+ struct hnae_desc *desc;
+ struct hnae_desc_cb *desc_cb;
+ unsigned char *va;
+ int bnum, length, size, i, truesize, last_offset;
+ int pull_len;
+ u32 bnum_flag;
+
+ last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ length = le16_to_cpu(desc->rx.pkt_len);
+ bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+ bnum = hnae_get_field(bnum_flag, HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
+ *out_bnum = bnum;
+ va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+
+ skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE);
+ if (unlikely(!skb)) {
+ netdev_err(ndev, "alloc rx skb fail\n");
+ ring->stats.sw_err_cnt++;
+ return -ENOMEM;
+ }
+
+ if (length <= HNS_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* this page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ if (unlikely(bnum != 1)) { /* check err*/
+ *out_bnum = 1;
+ goto out_bnum_err;
+ }
+ } else {
+ ring->stats.seg_pkt_cnt++;
+
+ pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
+ memcpy(__skb_put(skb, pull_len), va,
+ ALIGN(pull_len, sizeof(long)));
+
+ size = le16_to_cpu(desc->rx.size);
+ truesize = ALIGN(size, L1_CACHE_BYTES);
+ skb_add_rx_frag(skb, 0, desc_cb->priv,
+ desc_cb->page_offset + pull_len,
+ size - pull_len, truesize - pull_len);
+
+ hns_nic_reuse_page(desc_cb, truesize, last_offset);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
+ *out_bnum = 1;
+ goto out_bnum_err;
+ }
+ for (i = 1; i < bnum; i++) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ size = le16_to_cpu(desc->rx.size);
+ truesize = ALIGN(size, L1_CACHE_BYTES);
+ skb_add_rx_frag(skb, i, desc_cb->priv,
+ desc_cb->page_offset,
+ size, truesize);
+
+ hns_nic_reuse_page(desc_cb, truesize, last_offset);
+ ring_ptr_move_fw(ring, next_to_clean);
+ }
+ }
+
+ /* check except process, free skb and jump the desc */
+ if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
+out_bnum_err:
+ *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
+ netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
+ bnum, ring->max_desc_num_per_pkt,
+ length, (int)MAX_SKB_FRAGS,
+ ((u64 *)desc)[0], ((u64 *)desc)[1]);
+ ring->stats.err_bd_num++;
+ dev_kfree_skb_any(skb);
+ return -EDOM;
+ }
+
+ bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+
+ if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
+ netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
+ ((u64 *)desc)[0], ((u64 *)desc)[1]);
+ ring->stats.non_vld_descs++;
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (unlikely((!desc->rx.pkt_len) ||
+ hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
+ ring->stats.err_pkt_len++;
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
+ ring->stats.l2_err++;
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += skb->len;
+
+ if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
+ hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
+ ring->stats.l3l4_csum_err++;
+ return 0;
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return 0;
+}
+
+static void
+hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
+{
+ int i, ret;
+ struct hnae_desc_cb res_cbs;
+ struct hnae_desc_cb *desc_cb;
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+
+ for (i = 0; i < cleand_count; i++) {
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+ if (desc_cb->reuse_flag) {
+ ring->stats.reuse_pg_cnt++;
+ hnae_reuse_buffer(ring, ring->next_to_use);
+ } else {
+ ret = hnae_reserve_buffer_map(ring, &res_cbs);
+ if (ret) {
+ ring->stats.sw_err_cnt++;
+ netdev_err(ndev, "hnae reserve buffer map failed.\n");
+ break;
+ }
+ hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ wmb(); /* make all data has been write before submit */
+ writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
+}
+
+/* return error number for error or number of desc left to take
+ */
+static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ struct net_device *ndev = ring_data->napi.dev;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ (void)napi_gro_receive(&ring_data->napi, skb);
+ ndev->last_rx = jiffies;
+}
+
+static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
+ int budget, void *v)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct sk_buff *skb;
+ int num, bnum, ex_num;
+#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+ int recv_pkts, recv_bds, clean_count, err;
+
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+ rmb(); /* make sure num taken effect before the other data is touched */
+
+ recv_pkts = 0, recv_bds = 0, clean_count = 0;
+recv:
+ while (recv_pkts < budget && recv_bds < num) {
+ /* reuse or realloc buffers*/
+ if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ clean_count = 0;
+ }
+
+ /* poll one pkg*/
+ err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
+ if (unlikely(!skb)) /* this fault cannot be repaired */
+ break;
+
+ recv_bds += bnum;
+ clean_count += bnum;
+ if (unlikely(err)) { /* do jump the err */
+ recv_pkts++;
+ continue;
+ }
+
+ /* do update ip stack process*/
+ ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
+ ring_data, skb);
+ recv_pkts++;
+ }
+
+ /* make all data has been write before submit */
+ if (clean_count > 0) {
+ hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ clean_count = 0;
+ }
+
+ if (recv_pkts < budget) {
+ ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+ rmb(); /*complete read rx ring bd number*/
+ if (ex_num > 0) {
+ num += ex_num;
+ goto recv;
+ }
+ }
+
+ return recv_pkts;
+}
+
+static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int num = 0;
+
+ /* for hardware bug fixed */
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+ if (num > 0) {
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+
+ napi_schedule(&ring_data->napi);
+ }
+}
+
+static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
+ int *bytes, int *pkts)
+{
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+ (*bytes) += desc_cb->length;
+ /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+ hnae_free_buffer_detach(ring, ring->next_to_clean);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+}
+
+static int is_valid_clean_head(struct hnae_ring *ring, int h)
+{
+ int u = ring->next_to_use;
+ int c = ring->next_to_clean;
+
+ if (unlikely(h > ring->desc_num))
+ return 0;
+
+ assert(u > 0 && u < ring->desc_num);
+ assert(c > 0 && c < ring->desc_num);
+ assert(u != c && h != c); /* must be checked before call this func */
+
+ return u > c ? (h > c && h <= u) : (h > c || h <= u);
+}
+
+/* netif_tx_lock will turn down the performance, set only when necessary */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
+#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
+#else
+#define NETIF_TX_LOCK(ndev)
+#define NETIF_TX_UNLOCK(ndev)
+#endif
+/* reclaim all desc in one budget
+ * return error or number of desc left
+ */
+static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
+ int budget, void *v)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct netdev_queue *dev_queue;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ int head;
+ int bytes, pkts;
+
+ NETIF_TX_LOCK(ndev);
+
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ rmb(); /* make sure head is ready before touch any data */
+
+ if (is_ring_empty(ring) || head == ring->next_to_clean) {
+ NETIF_TX_UNLOCK(ndev);
+ return 0; /* no data to poll */
+ }
+
+ if (!is_valid_clean_head(ring, head)) {
+ netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
+ ring->next_to_use, ring->next_to_clean);
+ ring->stats.io_err_cnt++;
+ NETIF_TX_UNLOCK(ndev);
+ return -EIO;
+ }
+
+ bytes = 0;
+ pkts = 0;
+ while (head != ring->next_to_clean)
+ hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+ NETIF_TX_UNLOCK(ndev);
+
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_completed_queue(dev_queue, pkts, bytes);
+
+ if (unlikely(pkts && netif_carrier_ok(ndev) &&
+ (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_tx_queue_stopped(dev_queue) &&
+ !test_bit(NIC_STATE_DOWN, &priv->state)) {
+ netif_tx_wake_queue(dev_queue);
+ ring->stats.restart_queue++;
+ }
+ }
+ return 0;
+}
+
+static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int head = ring->next_to_clean;
+
+ /* for hardware bug fixed */
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+ if (head != ring->next_to_clean) {
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+
+ napi_schedule(&ring_data->napi);
+ }
+}
+
+static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct netdev_queue *dev_queue;
+ int head;
+ int bytes, pkts;
+
+ NETIF_TX_LOCK(ndev);
+
+ head = ring->next_to_use; /* ntu :soft setted ring position*/
+ bytes = 0;
+ pkts = 0;
+ while (head != ring->next_to_clean)
+ hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+ NETIF_TX_UNLOCK(ndev);
+
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_reset_queue(dev_queue);
+}
+
+static int hns_nic_common_poll(struct napi_struct *napi, int budget)
+{
+ struct hns_nic_ring_data *ring_data =
+ container_of(napi, struct hns_nic_ring_data, napi);
+ int clean_complete = ring_data->poll_one(
+ ring_data, budget, ring_data->ex_process);
+
+ if (clean_complete >= 0 && clean_complete < budget) {
+ napi_complete(napi);
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 0);
+
+ ring_data->fini_process(ring_data);
+ }
+
+ return clean_complete;
+}
+
+static irqreturn_t hns_irq_handle(int irq, void *dev)
+{
+ struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
+
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+ napi_schedule(&ring_data->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
+ *@ndev: net device
+ */
+static void hns_nic_adjust_link(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
+}
+
+/**
+ *hns_nic_init_phy - init phy
+ *@ndev: net device
+ *@h: ae handle
+ * Return 0 on success, negative on failure
+ */
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy_dev = NULL;
+
+ if (!h->phy_node)
+ return 0;
+
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ phy_dev = of_phy_connect(ndev, h->phy_node,
+ hns_nic_adjust_link, 0, h->phy_if);
+ else
+ phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
+
+ if (unlikely(!phy_dev) || IS_ERR(phy_dev))
+ return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
+
+ phy_dev->supported &= h->if_support;
+ phy_dev->advertising = phy_dev->supported;
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+ phy_dev->autoneg = false;
+
+ priv->phy = phy_dev;
+
+ return 0;
+}
+
+static int hns_nic_ring_open(struct net_device *netdev, int idx)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ napi_enable(&priv->ring_data[idx].napi);
+
+ enable_irq(priv->ring_data[idx].ring->irq);
+ h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
+
+ return 0;
+}
+
+static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct sockaddr *mac_addr = p;
+ int ret;
+
+ if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
+ if (ret) {
+ netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
+
+ return 0;
+}
+
+void hns_nic_update_stats(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->update_stats(h, &netdev->stats);
+}
+
+/* set mac addr if it is configed. or leave it to the AE driver */
+static void hns_init_mac_addr(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct device_node *node = priv->dev->of_node;
+ const void *mac_addr_temp;
+
+ mac_addr_temp = of_get_mac_address(node);
+ if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
+ memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
+ } else {
+ eth_hw_addr_random(ndev);
+ dev_warn(priv->dev, "No valid mac, use random mac %pM",
+ ndev->dev_addr);
+ }
+}
+
+static void hns_nic_ring_close(struct net_device *netdev, int idx)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
+ disable_irq(priv->ring_data[idx].ring->irq);
+
+ napi_disable(&priv->ring_data[idx].napi);
+}
+
+static int hns_nic_init_irq(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hns_nic_ring_data *rd;
+ int i;
+ int ret;
+ int cpu;
+ cpumask_t mask;
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+
+ if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
+ break;
+
+ snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
+ "%s-%s%d", priv->netdev->name,
+ (i < h->q_num ? "tx" : "rx"), rd->queue_index);
+
+ rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
+
+ ret = request_irq(rd->ring->irq,
+ hns_irq_handle, 0, rd->ring->ring_name, rd);
+ if (ret) {
+ netdev_err(priv->netdev, "request irq(%d) fail\n",
+ rd->ring->irq);
+ return ret;
+ }
+ disable_irq(rd->ring->irq);
+ rd->ring->irq_init_flag = RCB_IRQ_INITED;
+
+ /*set cpu affinity*/
+ if (cpu_online(rd->queue_index)) {
+ cpumask_clear(&mask);
+ cpu = rd->queue_index;
+ cpumask_set_cpu(cpu, &mask);
+ irq_set_affinity_hint(rd->ring->irq, &mask);
+ }
+ }
+
+ return 0;
+}
+
+static int hns_nic_net_up(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int i, j, k;
+ int ret;
+
+ ret = hns_nic_init_irq(priv);
+ if (ret != 0) {
+ netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ ret = hns_nic_ring_open(ndev, i);
+ if (ret)
+ goto out_has_some_queues;
+ }
+
+ for (k = 0; k < h->q_num; k++)
+ h->dev->ops->toggle_queue_status(h->qs[k], 1);
+
+ ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
+ if (ret)
+ goto out_set_mac_addr_err;
+
+ ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+ if (ret)
+ goto out_start_err;
+
+ if (priv->phy)
+ phy_start(priv->phy);
+
+ clear_bit(NIC_STATE_DOWN, &priv->state);
+ (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+ return 0;
+
+out_start_err:
+ netif_stop_queue(ndev);
+out_set_mac_addr_err:
+ for (k = 0; k < h->q_num; k++)
+ h->dev->ops->toggle_queue_status(h->qs[k], 0);
+out_has_some_queues:
+ for (j = i - 1; j >= 0; j--)
+ hns_nic_ring_close(ndev, j);
+
+ set_bit(NIC_STATE_DOWN, &priv->state);
+
+ return ret;
+}
+
+static void hns_nic_net_down(struct net_device *ndev)
+{
+ int i;
+ struct hnae_ae_ops *ops;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
+ return;
+
+ (void)del_timer_sync(&priv->service_timer);
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+ priv->link = 0;
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->stop)
+ ops->stop(priv->ae_handle);
+
+ netif_tx_stop_all_queues(ndev);
+
+ for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
+ hns_nic_ring_close(ndev, i);
+ hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
+
+ /* clean tx buffers*/
+ hns_nic_tx_clr_all_bufs(priv->ring_data + i);
+ }
+}
+
+void hns_nic_net_reset(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *handle = priv->ae_handle;
+
+ while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
+ usleep_range(1000, 2000);
+
+ (void)hnae_reinit_handle(handle);
+
+ clear_bit(NIC_STATE_RESETTING, &priv->state);
+}
+
+void hns_nic_net_reinit(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ priv->netdev->trans_start = jiffies;
+ while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
+ usleep_range(1000, 2000);
+
+ hns_nic_net_down(netdev);
+ hns_nic_net_reset(netdev);
+ (void)hns_nic_net_up(netdev);
+ clear_bit(NIC_STATE_REINITING, &priv->state);
+}
+
+static int hns_nic_net_open(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ if (test_bit(NIC_STATE_TESTING, &priv->state))
+ return -EBUSY;
+
+ priv->link = 0;
+ netif_carrier_off(ndev);
+
+ ret = netif_set_real_num_tx_queues(ndev, h->q_num);
+ if (ret < 0) {
+ netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
+ ret);
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, h->q_num);
+ if (ret < 0) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ ret = hns_nic_net_up(ndev);
+ if (ret) {
+ netdev_err(ndev,
+ "hns net up fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_nic_net_stop(struct net_device *ndev)
+{
+ hns_nic_net_down(ndev);
+
+ return 0;
+}
+
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+static void hns_nic_net_timeout(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ hns_tx_timeout_reset(priv);
+}
+
+static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy_dev = priv->phy;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!phy_dev)
+ return -ENOTSUPP;
+
+ return phy_mii_ioctl(phy_dev, ifr, cmd);
+}
+
+/* use only for netconsole to poll with the device without interrupt */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void hns_nic_poll_controller(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < priv->ae_handle->q_num * 2; i++)
+ napi_schedule(&priv->ring_data[i].napi);
+ local_irq_restore(flags);
+}
+#endif
+
+static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ assert(skb->queue_mapping < ndev->ae_handle->q_num);
+ ret = hns_nic_net_xmit_hw(ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
+ if (ret == NETDEV_TX_OK) {
+ ndev->trans_start = jiffies;
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+ }
+ return (netdev_tx_t)ret;
+}
+
+static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ if (!h->dev->ops->set_mtu)
+ return -ENOTSUPP;
+
+ if (netif_running(ndev)) {
+ (void)hns_nic_net_stop(ndev);
+ msleep(100);
+
+ ret = h->dev->ops->set_mtu(h, new_mtu);
+ if (ret)
+ netdev_err(ndev, "set mtu fail, return value %d\n",
+ ret);
+
+ if (hns_nic_net_open(ndev))
+ netdev_err(ndev, "hns net open fail\n");
+ } else {
+ ret = h->dev->ops->set_mtu(h, new_mtu);
+ }
+
+ if (!ret)
+ ndev->mtu = new_mtu;
+
+ return ret;
+}
+
+/**
+ * nic_set_multicast_list - set mutl mac address
+ * @netdev: net device
+ * @p: mac address
+ *
+ * return void
+ */
+void hns_set_multicast_list(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct netdev_hw_addr *ha = NULL;
+
+ if (!h) {
+ netdev_err(ndev, "hnae handle is null\n");
+ return;
+ }
+
+ if (h->dev->ops->set_mc_addr) {
+ netdev_for_each_mc_addr(ha, ndev)
+ if (h->dev->ops->set_mc_addr(h, ha->addr))
+ netdev_err(ndev, "set multicast fail\n");
+ }
+}
+
+void hns_nic_set_rx_mode(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->set_promisc_mode) {
+ if (ndev->flags & IFF_PROMISC)
+ h->dev->ops->set_promisc_mode(h, 1);
+ else
+ h->dev->ops->set_promisc_mode(h, 0);
+ }
+
+ hns_set_multicast_list(ndev);
+}
+
+struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ int idx = 0;
+ u64 tx_bytes = 0;
+ u64 rx_bytes = 0;
+ u64 tx_pkts = 0;
+ u64 rx_pkts = 0;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ for (idx = 0; idx < h->q_num; idx++) {
+ tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
+ tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
+ rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
+ rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
+ }
+
+ stats->tx_bytes = tx_bytes;
+ stats->tx_packets = tx_pkts;
+ stats->rx_bytes = rx_bytes;
+ stats->rx_packets = rx_pkts;
+
+ stats->rx_errors = ndev->stats.rx_errors;
+ stats->multicast = ndev->stats.multicast;
+ stats->rx_length_errors = ndev->stats.rx_length_errors;
+ stats->rx_crc_errors = ndev->stats.rx_crc_errors;
+ stats->rx_missed_errors = ndev->stats.rx_missed_errors;
+
+ stats->tx_errors = ndev->stats.tx_errors;
+ stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->tx_dropped = ndev->stats.tx_dropped;
+ stats->collisions = ndev->stats.collisions;
+ stats->rx_over_errors = ndev->stats.rx_over_errors;
+ stats->rx_frame_errors = ndev->stats.rx_frame_errors;
+ stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
+ stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
+ stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
+ stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
+ stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
+ stats->tx_window_errors = ndev->stats.tx_window_errors;
+ stats->rx_compressed = ndev->stats.rx_compressed;
+ stats->tx_compressed = ndev->stats.tx_compressed;
+
+ return stats;
+}
+
+static const struct net_device_ops hns_nic_netdev_ops = {
+ .ndo_open = hns_nic_net_open,
+ .ndo_stop = hns_nic_net_stop,
+ .ndo_start_xmit = hns_nic_net_xmit,
+ .ndo_tx_timeout = hns_nic_net_timeout,
+ .ndo_set_mac_address = hns_nic_net_set_mac_address,
+ .ndo_change_mtu = hns_nic_change_mtu,
+ .ndo_do_ioctl = hns_nic_do_ioctl,
+ .ndo_get_stats64 = hns_nic_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = hns_nic_poll_controller,
+#endif
+ .ndo_set_rx_mode = hns_nic_set_rx_mode,
+};
+
+static void hns_nic_update_link_status(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ struct hnae_handle *h = priv->ae_handle;
+ int state = 1;
+
+ if (priv->phy) {
+ if (!genphy_update_link(priv->phy))
+ state = priv->phy->link;
+ else
+ state = 0;
+ }
+ state = state && h->dev->ops->get_status(h);
+
+ if (state != priv->link) {
+ if (state) {
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ netdev_info(netdev, "link up\n");
+ } else {
+ netif_carrier_off(netdev);
+ netdev_info(netdev, "link down\n");
+ }
+ priv->link = state;
+ }
+}
+
+/* for dumping key regs*/
+static void hns_nic_dump(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ u32 *data, reg_num, i;
+
+ if (ops->get_regs_len && ops->get_regs) {
+ reg_num = ops->get_regs_len(priv->ae_handle);
+ reg_num = (reg_num + 3ul) & ~3ul;
+ data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
+ if (data) {
+ ops->get_regs(priv->ae_handle, data);
+ for (i = 0; i < reg_num; i += 4)
+ pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, data[i], data[i + 1],
+ data[i + 2], data[i + 3]);
+ kfree(data);
+ }
+ }
+
+ for (i = 0; i < h->q_num; i++) {
+ pr_info("tx_queue%d_next_to_clean:%d\n",
+ i, h->qs[i]->tx_ring.next_to_clean);
+ pr_info("tx_queue%d_next_to_use:%d\n",
+ i, h->qs[i]->tx_ring.next_to_use);
+ pr_info("rx_queue%d_next_to_clean:%d\n",
+ i, h->qs[i]->rx_ring.next_to_clean);
+ pr_info("rx_queue%d_next_to_use:%d\n",
+ i, h->qs[i]->rx_ring.next_to_use);
+ }
+}
+
+/* for resetting suntask*/
+static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
+{
+ enum hnae_port_type type = priv->ae_handle->port_type;
+
+ if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
+ return;
+ clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+
+ /* If we're already down, removing or resetting, just bail */
+ if (test_bit(NIC_STATE_DOWN, &priv->state) ||
+ test_bit(NIC_STATE_REMOVING, &priv->state) ||
+ test_bit(NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ hns_nic_dump(priv);
+ netdev_info(priv->netdev, "Reset %s port\n",
+ (type == HNAE_PORT_DEBUG ? "debug" : "business"));
+
+ rtnl_lock();
+ /* put off any impending NetWatchDogTimeout */
+ priv->netdev->trans_start = jiffies;
+
+ if (type == HNAE_PORT_DEBUG)
+ hns_nic_net_reinit(priv->netdev);
+ rtnl_unlock();
+}
+
+/* for doing service complete*/
+static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
+{
+ assert(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
+
+ smp_mb__before_atomic();
+ clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+}
+
+static void hns_nic_service_task(struct work_struct *work)
+{
+ struct hns_nic_priv *priv
+ = container_of(work, struct hns_nic_priv, service_task);
+ struct hnae_handle *h = priv->ae_handle;
+
+ hns_nic_update_link_status(priv->netdev);
+ h->dev->ops->update_led_status(h);
+ hns_nic_update_stats(priv->netdev);
+
+ hns_nic_reset_subtask(priv);
+ hns_nic_service_event_complete(priv);
+}
+
+static void hns_nic_task_schedule(struct hns_nic_priv *priv)
+{
+ if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
+ !test_bit(NIC_STATE_REMOVING, &priv->state) &&
+ !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
+ (void)schedule_work(&priv->service_task);
+}
+
+static void hns_nic_service_timer(unsigned long data)
+{
+ struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
+
+ (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+ hns_nic_task_schedule(priv);
+}
+
+/**
+ * hns_tx_timeout_reset - initiate reset due to Tx timeout
+ * @priv: driver private struct
+ **/
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
+{
+ /* Do the reset outside of interrupt context */
+ if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
+ set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+ netdev_warn(priv->netdev,
+ "initiating reset due to tx timeout(%llu,0x%lx)\n",
+ priv->tx_timeout_count, priv->state);
+ priv->tx_timeout_count++;
+ hns_nic_task_schedule(priv);
+ }
+}
+
+static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hns_nic_ring_data *rd;
+ int i;
+
+ if (h->q_num > NIC_MAX_Q_PER_VF) {
+ netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
+ return -EINVAL;
+ }
+
+ priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
+ GFP_KERNEL);
+ if (!priv->ring_data)
+ return -ENOMEM;
+
+ for (i = 0; i < h->q_num; i++) {
+ rd = &priv->ring_data[i];
+ rd->queue_index = i;
+ rd->ring = &h->qs[i]->tx_ring;
+ rd->poll_one = hns_nic_tx_poll_one;
+ rd->fini_process = hns_nic_tx_fini_pro;
+
+ netif_napi_add(priv->netdev, &rd->napi,
+ hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+ rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+ for (i = h->q_num; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+ rd->queue_index = i - h->q_num;
+ rd->ring = &h->qs[i - h->q_num]->rx_ring;
+ rd->poll_one = hns_nic_rx_poll_one;
+ rd->ex_process = hns_nic_rx_up_pro;
+ rd->fini_process = hns_nic_rx_fini_pro;
+
+ netif_napi_add(priv->netdev, &rd->napi,
+ hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+ rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+
+ return 0;
+}
+
+static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ netif_napi_del(&priv->ring_data[i].napi);
+ if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+ irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+ NULL);
+ free_irq(priv->ring_data[i].ring->irq,
+ &priv->ring_data[i]);
+ }
+
+ priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+ kfree(priv->ring_data);
+}
+
+static int hns_nic_try_get_ae(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h;
+ int ret;
+
+ h = hnae_get_handle(&priv->netdev->dev,
+ priv->ae_name, priv->port_id, NULL);
+ if (IS_ERR_OR_NULL(h)) {
+ ret = PTR_ERR(h);
+ dev_dbg(priv->dev, "has not handle, register notifier!\n");
+ goto out;
+ }
+ priv->ae_handle = h;
+
+ ret = hns_nic_init_phy(ndev, h);
+ if (ret) {
+ dev_err(priv->dev, "probe phy device fail!\n");
+ goto out_init_phy;
+ }
+
+ ret = hns_nic_init_ring_data(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_init_ring_data;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_ndev_fail;
+ }
+ return 0;
+
+out_reg_ndev_fail:
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+out_init_phy:
+out_init_ring_data:
+ hnae_put_handle(priv->ae_handle);
+ priv->ae_handle = NULL;
+out:
+ return ret;
+}
+
+static int hns_nic_notifier_action(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct hns_nic_priv *priv =
+ container_of(nb, struct hns_nic_priv, notifier_block);
+
+ assert(action == HNAE_AE_REGISTER);
+
+ if (!hns_nic_try_get_ae(priv->netdev)) {
+ hnae_unregister_notifier(&priv->notifier_block);
+ priv->notifier_block.notifier_call = NULL;
+ }
+ return 0;
+}
+
+static int hns_nic_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct hns_nic_priv *priv;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->netdev = ndev;
+
+ if (of_device_is_compatible(node, "hisilicon,hns-nic-v2"))
+ priv->enet_ver = AE_VERSION_2;
+ else
+ priv->enet_ver = AE_VERSION_1;
+
+ ret = of_property_read_string(node, "ae-name", &priv->ae_name);
+ if (ret)
+ goto out_read_string_fail;
+
+ ret = of_property_read_u32(node, "port-id", &priv->port_id);
+ if (ret)
+ goto out_read_string_fail;
+
+ hns_init_mac_addr(ndev);
+
+ ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hns_nic_netdev_ops;
+ hns_ethtool_set_ops(ndev);
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO;
+ ndev->vlan_features |=
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ dev_dbg(dev, "set mask to 64bit\n");
+ else
+ dev_err(dev, "set mask to 32bit fail!\n");
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(ndev);
+
+ setup_timer(&priv->service_timer, hns_nic_service_timer,
+ (unsigned long)priv);
+ INIT_WORK(&priv->service_task, hns_nic_service_task);
+
+ set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
+ clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+ set_bit(NIC_STATE_DOWN, &priv->state);
+
+ if (hns_nic_try_get_ae(priv->netdev)) {
+ priv->notifier_block.notifier_call = hns_nic_notifier_action;
+ ret = hnae_register_notifier(&priv->notifier_block);
+ if (ret) {
+ dev_err(dev, "register notifier fail!\n");
+ goto out_notify_fail;
+ }
+ dev_dbg(dev, "has not handle, register notifier!\n");
+ }
+
+ return 0;
+
+out_notify_fail:
+ (void)cancel_work_sync(&priv->service_task);
+out_read_string_fail:
+ free_netdev(ndev);
+ return ret;
+}
+
+static int hns_nic_dev_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (ndev->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(ndev);
+
+ if (priv->ring_data)
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+
+ if (priv->phy)
+ phy_disconnect(priv->phy);
+ priv->phy = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->ae_handle))
+ hnae_put_handle(priv->ae_handle);
+ priv->ae_handle = NULL;
+ if (priv->notifier_block.notifier_call)
+ hnae_unregister_notifier(&priv->notifier_block);
+ priv->notifier_block.notifier_call = NULL;
+
+ set_bit(NIC_STATE_REMOVING, &priv->state);
+ (void)cancel_work_sync(&priv->service_task);
+
+ free_netdev(ndev);
+ return 0;
+}
+
+static const struct of_device_id hns_enet_of_match[] = {
+ {.compatible = "hisilicon,hns-nic-v1",},
+ {.compatible = "hisilicon,hns-nic-v2",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hns_enet_of_match);
+
+static struct platform_driver hns_nic_dev_driver = {
+ .driver = {
+ .name = "hns-nic",
+ .of_match_table = hns_enet_of_match,
+ },
+ .probe = hns_nic_dev_probe,
+ .remove = hns_nic_dev_remove,
+};
+
+module_platform_driver(hns_nic_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hns-nic");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
new file mode 100644
index 000000000000..dae0ed19ac6d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNS_ENET_H
+#define __HNS_ENET_H
+
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#include "hnae.h"
+
+enum hns_nic_state {
+ NIC_STATE_TESTING = 0,
+ NIC_STATE_RESETTING,
+ NIC_STATE_REINITING,
+ NIC_STATE_DOWN,
+ NIC_STATE_DISABLED,
+ NIC_STATE_REMOVING,
+ NIC_STATE_SERVICE_INITED,
+ NIC_STATE_SERVICE_SCHED,
+ NIC_STATE2_RESET_REQUESTED,
+ NIC_STATE_MAX
+};
+
+struct hns_nic_ring_data {
+ struct hnae_ring *ring;
+ struct napi_struct napi;
+ int queue_index;
+ int (*poll_one)(struct hns_nic_ring_data *, int, void *);
+ void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
+ void (*fini_process)(struct hns_nic_ring_data *);
+};
+
+struct hns_nic_priv {
+ const char *ae_name;
+ u32 enet_ver;
+ u32 port_id;
+ int phy_mode;
+ int phy_led_val;
+ struct phy_device *phy;
+ struct net_device *netdev;
+ struct device *dev;
+ struct hnae_handle *ae_handle;
+
+ /* the cb for nic to manage the ring buffer, the first half of the
+ * array is for tx_ring and vice versa for the second half
+ */
+ struct hns_nic_ring_data *ring_data;
+
+ /* The most recently read link state */
+ int link;
+ u64 tx_timeout_count;
+
+ unsigned long state;
+
+ struct timer_list service_timer;
+
+ struct work_struct service_task;
+
+ struct notifier_block notifier_block;
+};
+
+#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
+#define rx_ring_data(priv, idx) \
+ ((priv)->ring_data[(priv)->ae_handle->q_num + (idx)])
+
+void hns_ethtool_set_ops(struct net_device *ndev);
+void hns_nic_net_reset(struct net_device *ndev);
+void hns_nic_net_reinit(struct net_device *netdev);
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
+int hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data);
+
+#endif /**__HNS_ENET_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
new file mode 100644
index 000000000000..a0332129970b
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -0,0 +1,1214 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "hns_enet.h"
+
+#define HNS_PHY_PAGE_MDIX 0
+#define HNS_PHY_PAGE_LED 3
+#define HNS_PHY_PAGE_COPPER 0
+
+#define HNS_PHY_PAGE_REG 22 /* Page Selection Reg. */
+#define HNS_PHY_CSC_REG 16 /* Copper Specific Control Register */
+#define HNS_PHY_CSS_REG 17 /* Copper Specific Status Register */
+#define HNS_LED_FC_REG 16 /* LED Function Control Reg. */
+#define HNS_LED_PC_REG 17 /* LED Polarity Control Reg. */
+
+#define HNS_LED_FORCE_ON 9
+#define HNS_LED_FORCE_OFF 8
+
+#define HNS_CHIP_VERSION 660
+#define HNS_NET_STATS_CNT 26
+
+#define PHY_MDIX_CTRL_S (5)
+#define PHY_MDIX_CTRL_M (3 << PHY_MDIX_CTRL_S)
+
+#define PHY_MDIX_STATUS_B (6)
+#define PHY_SPEED_DUP_RESOLVE_B (11)
+
+/**
+ *hns_nic_get_link - get current link status
+ *@net_dev: net_device
+ *retuen 0 - success , negative --fail
+ */
+static u32 hns_nic_get_link(struct net_device *net_dev)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ u32 link_stat = priv->link;
+ struct hnae_handle *h;
+
+ assert(priv && priv->ae_handle);
+ h = priv->ae_handle;
+
+ if (priv->phy) {
+ if (!genphy_update_link(priv->phy))
+ link_stat = priv->phy->link;
+ else
+ link_stat = 0;
+ }
+
+ if (h->dev && h->dev->ops && h->dev->ops->get_status)
+ link_stat = link_stat && h->dev->ops->get_status(h);
+ else
+ link_stat = 0;
+
+ return link_stat;
+}
+
+static void hns_get_mdix_mode(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ int mdix_ctrl, mdix, retval, is_resolved;
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct phy_device *phy_dev = priv->phy;
+
+ if (!phy_dev || !phy_dev->bus) {
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ return;
+ }
+
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_MDIX);
+
+ retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSC_REG);
+ mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S);
+
+ retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSS_REG);
+ mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B);
+ is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B);
+
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+
+ switch (mdix_ctrl) {
+ case 0x0:
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+ break;
+ case 0x1:
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case 0x3:
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ default:
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ break;
+ }
+
+ if (!is_resolved)
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ else if (mdix)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+}
+
+/**
+ *hns_nic_get_settings - implement ethtool get settings
+ *@net_dev: net_device
+ *@cmd: ethtool_cmd
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ u32 link_stat;
+ int ret;
+ u8 duplex;
+ u16 speed;
+
+ if (!priv || !priv->ae_handle)
+ return -ESRCH;
+
+ h = priv->ae_handle;
+ if (!h->dev || !h->dev->ops || !h->dev->ops->get_info)
+ return -ESRCH;
+
+ ret = h->dev->ops->get_info(h, NULL, &speed, &duplex);
+ if (ret < 0) {
+ netdev_err(net_dev, "%s get_info error!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* When there is no phy, autoneg is off. */
+ cmd->autoneg = false;
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+
+ if (priv->phy)
+ (void)phy_ethtool_gset(priv->phy, cmd);
+
+ link_stat = hns_nic_get_link(net_dev);
+ if (!link_stat) {
+ ethtool_cmd_speed_set(cmd, (u32)SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ if (cmd->autoneg)
+ cmd->advertising |= ADVERTISED_Autoneg;
+
+ cmd->supported |= h->if_support;
+ if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+ } else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_10000baseKR_Full;
+ }
+
+ if (h->port_type == HNAE_PORT_SERVICE) {
+ cmd->port = PORT_FIBRE;
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->port = PORT_TP;
+ }
+
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
+ hns_get_mdix_mode(net_dev, cmd);
+
+ return 0;
+}
+
+/**
+ *hns_nic_set_settings - implement ethtool set settings
+ *@net_dev: net_device
+ *@cmd: ethtool_cmd
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ u32 speed;
+
+ if (!netif_running(net_dev))
+ return -ESRCH;
+
+ if (!priv || !priv->ae_handle || !priv->ae_handle->dev ||
+ !priv->ae_handle->dev->ops)
+ return -ENODEV;
+
+ h = priv->ae_handle;
+ speed = ethtool_cmd_speed(cmd);
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ if (cmd->autoneg == AUTONEG_ENABLE || speed != SPEED_10000 ||
+ cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ if (!priv->phy && cmd->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if (speed == SPEED_1000 && cmd->duplex == DUPLEX_HALF)
+ return -EINVAL;
+ if (priv->phy)
+ return phy_ethtool_sset(priv->phy, cmd);
+
+ if ((speed != SPEED_10 && speed != SPEED_100 &&
+ speed != SPEED_1000) || (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+ } else {
+ netdev_err(net_dev, "Not supported!");
+ return -ENOTSUPP;
+ }
+
+ if (h->dev->ops->adjust_link) {
+ h->dev->ops->adjust_link(h, (int)speed, cmd->duplex);
+ return 0;
+ }
+
+ netdev_err(net_dev, "Not supported!");
+ return -ENOTSUPP;
+}
+
+static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
+ "Mac Loopback test",
+ "Serdes Loopback test",
+ "Phy Loopback test"
+};
+
+static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
+{
+#define COPPER_CONTROL_REG 0
+#define PHY_LOOP_BACK BIT(14)
+ u16 val = 0;
+
+ if (phy_dev->is_c45) /* c45 branch adding for XGE PHY */
+ return -ENOTSUPP;
+
+ if (en) {
+ /* speed : 1000M */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG, 2);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 21, 0x1046);
+ /* Force Master */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 9, 0x1F00);
+ /* Soft-reset */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 0, 0x9140);
+ /* If autoneg disabled,two soft-reset operations */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 0, 0x9140);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 22, 0xFA);
+
+ /* Default is 0x0400 */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 1, 0x418);
+
+ /* Force 1000M Link, Default is 0x0200 */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 7, 0x20C);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 22, 0);
+
+ /* Enable MAC loop-back */
+ val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr,
+ COPPER_CONTROL_REG);
+ val |= PHY_LOOP_BACK;
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ COPPER_CONTROL_REG, val);
+ } else {
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 22, 0xFA);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 1, 0x400);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 7, 0x200);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 22, 0);
+
+ val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr,
+ COPPER_CONTROL_REG);
+ val &= ~PHY_LOOP_BACK;
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ COPPER_CONTROL_REG, val);
+ }
+ return 0;
+}
+
+static int __lb_setup(struct net_device *ndev,
+ enum hnae_loop loop)
+{
+ int ret = 0;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy_dev = priv->phy;
+ struct hnae_handle *h = priv->ae_handle;
+
+ switch (loop) {
+ case MAC_INTERNALLOOP_PHY:
+ if ((phy_dev) && (!phy_dev->is_c45))
+ ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+ break;
+ case MAC_INTERNALLOOP_MAC:
+ if ((h->dev->ops->set_loopback) &&
+ (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII))
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_INTERNALLOOP_SERDES:
+ if (h->dev->ops->set_loopback)
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_LOOP_NONE:
+ if ((phy_dev) && (!phy_dev->is_c45))
+ ret |= hns_nic_config_phy_loopback(phy_dev, 0x0);
+
+ if (h->dev->ops->set_loopback) {
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ ret |= h->dev->ops->set_loopback(h,
+ MAC_INTERNALLOOP_MAC, 0x0);
+
+ ret |= h->dev->ops->set_loopback(h,
+ MAC_INTERNALLOOP_SERDES, 0x0);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int __lb_up(struct net_device *ndev,
+ enum hnae_loop loop_mode)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int speed, duplex;
+ int ret;
+
+ hns_nic_net_reset(ndev);
+
+ if (priv->phy) {
+ phy_disconnect(priv->phy);
+ msleep(100);
+
+ ret = hns_nic_init_phy(ndev, h);
+ if (ret)
+ return ret;
+ }
+
+ ret = __lb_setup(ndev, loop_mode);
+ if (ret)
+ return ret;
+
+ msleep(100);
+
+ ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+ if (ret)
+ return ret;
+
+ if (priv->phy)
+ phy_start(priv->phy);
+
+ /* link adjust duplex*/
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ speed = 1000;
+ else
+ speed = 10000;
+ duplex = 1;
+
+ h->dev->ops->adjust_link(h, speed, duplex);
+
+ return 0;
+}
+
+static void __lb_other_process(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ struct net_device *ndev;
+ struct hnae_ring *ring;
+ struct netdev_queue *dev_queue;
+ struct sk_buff *new_skb;
+ unsigned int frame_size;
+ int check_ok;
+ u32 i;
+ char buff[33]; /* 32B data and the last character '\0' */
+
+ if (!ring_data) { /* Just for doing create frame*/
+ frame_size = skb->len;
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1ul;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE,
+ frame_size / 2 - 11);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF,
+ frame_size / 2 - 13);
+ return;
+ }
+
+ ring = ring_data->ring;
+ ndev = ring_data->napi.dev;
+ if (is_tx_ring(ring)) { /* for tx queue reset*/
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ return;
+ }
+
+ frame_size = skb->len;
+ frame_size &= ~1ul;
+ /* for mutl buffer*/
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+
+ check_ok = 0;
+ if (*(skb->data + 10) == 0xFF) { /* for rx check frame*/
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ check_ok = 1;
+ }
+
+ if (check_ok) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ } else {
+ ndev->stats.rx_frame_errors++;
+ for (i = 0; i < skb->len; i++) {
+ snprintf(buff + i % 16 * 2, 3, /* tailing \0*/
+ "%02x", *(skb->data + i));
+ if ((i % 16 == 15) || (i == skb->len - 1))
+ pr_info("%s\n", buff);
+ }
+ }
+ dev_kfree_skb_any(skb);
+}
+
+static int __lb_clean_rings(struct hns_nic_priv *priv,
+ int ringid0, int ringid1, int budget)
+{
+ int i, ret;
+ struct hns_nic_ring_data *ring_data;
+ struct net_device *ndev = priv->netdev;
+ unsigned long rx_packets = ndev->stats.rx_packets;
+ unsigned long rx_bytes = ndev->stats.rx_bytes;
+ unsigned long rx_frame_errors = ndev->stats.rx_frame_errors;
+
+ for (i = ringid0; i <= ringid1; i++) {
+ ring_data = &priv->ring_data[i];
+ (void)ring_data->poll_one(ring_data,
+ budget, __lb_other_process);
+ }
+ ret = (int)(ndev->stats.rx_packets - rx_packets);
+ ndev->stats.rx_packets = rx_packets;
+ ndev->stats.rx_bytes = rx_bytes;
+ ndev->stats.rx_frame_errors = rx_frame_errors;
+ return ret;
+}
+
+/**
+ * nic_run_loopback_test - run loopback test
+ * @nic_dev: net device
+ * @loopback_type: loopback type
+ */
+static int __lb_run_test(struct net_device *ndev,
+ enum hnae_loop loop_mode)
+{
+#define NIC_LB_TEST_PKT_NUM_PER_CYCLE 1
+#define NIC_LB_TEST_RING_ID 0
+#define NIC_LB_TEST_FRAME_SIZE 128
+/* nic loopback test err */
+#define NIC_LB_TEST_NO_MEM_ERR 1
+#define NIC_LB_TEST_TX_CNT_ERR 2
+#define NIC_LB_TEST_RX_CNT_ERR 3
+#define NIC_LB_TEST_RX_PKG_ERR 4
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int i, j, lc, good_cnt, ret_val = 0;
+ unsigned int size;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
+
+ size = NIC_LB_TEST_FRAME_SIZE;
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NIC_LB_TEST_NO_MEM_ERR;
+
+ /* place data into test skb */
+ (void)skb_put(skb, size);
+ __lb_other_process(NULL, skb);
+ skb->queue_mapping = NIC_LB_TEST_RING_ID;
+
+ lc = 1;
+ for (j = 0; j < lc; j++) {
+ /* reset count of good packets */
+ good_cnt = 0;
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < NIC_LB_TEST_PKT_NUM_PER_CYCLE; i++) {
+ (void)skb_get(skb);
+
+ tx_ret_val = (netdev_tx_t)hns_nic_net_xmit_hw(
+ ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
+ if (tx_ret_val == NETDEV_TX_OK)
+ good_cnt++;
+ else
+ break;
+ }
+ if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+ ret_val = NIC_LB_TEST_TX_CNT_ERR;
+ dev_err(priv->dev, "%s sent fail, cnt=0x%x, budget=0x%x\n",
+ hns_nic_test_strs[loop_mode], good_cnt,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ break;
+ }
+
+ /* allow 100 milliseconds for packets to go from Tx to Rx */
+ msleep(100);
+
+ good_cnt = __lb_clean_rings(priv,
+ h->q_num, h->q_num * 2 - 1,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+ ret_val = NIC_LB_TEST_RX_CNT_ERR;
+ dev_err(priv->dev, "%s recv fail, cnt=0x%x, budget=0x%x\n",
+ hns_nic_test_strs[loop_mode], good_cnt,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ break;
+ }
+ (void)__lb_clean_rings(priv,
+ NIC_LB_TEST_RING_ID, NIC_LB_TEST_RING_ID,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ }
+
+ /* free the original skb */
+ kfree_skb(skb);
+
+ return ret_val;
+}
+
+static int __lb_down(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ ret = __lb_setup(ndev, MAC_LOOP_NONE);
+ if (ret)
+ netdev_err(ndev, "%s: __lb_setup return error(%d)!\n",
+ __func__,
+ ret);
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ if (h->dev->ops->stop)
+ h->dev->ops->stop(h);
+
+ usleep_range(10000, 20000);
+ (void)__lb_clean_rings(priv, 0, h->q_num - 1, 256);
+
+ hns_nic_net_reset(ndev);
+
+ return 0;
+}
+
+/**
+ * hns_nic_self_test - self test
+ * @dev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns_nic_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ bool if_running = netif_running(ndev);
+#define SELF_TEST_TPYE_NUM 3
+ int st_param[SELF_TEST_TPYE_NUM][2];
+ int i;
+ int test_index = 0;
+
+ st_param[0][0] = MAC_INTERNALLOOP_MAC; /* XGE not supported lb */
+ st_param[0][1] = (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII);
+ st_param[1][0] = MAC_INTERNALLOOP_SERDES;
+ st_param[1][1] = 1; /*serdes must exist*/
+ st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
+ st_param[2][1] = ((!!(priv->ae_handle->phy_node)) &&
+ (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ set_bit(NIC_STATE_TESTING, &priv->state);
+
+ if (if_running)
+ (void)dev_close(ndev);
+
+ for (i = 0; i < SELF_TEST_TPYE_NUM; i++) {
+ if (!st_param[i][1])
+ continue; /* NEXT testing */
+
+ data[test_index] = __lb_up(ndev,
+ (enum hnae_loop)st_param[i][0]);
+ if (!data[test_index]) {
+ data[test_index] = __lb_run_test(
+ ndev, (enum hnae_loop)st_param[i][0]);
+ (void)__lb_down(ndev);
+ }
+
+ if (data[test_index])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ test_index++;
+ }
+
+ hns_nic_net_reset(priv->netdev);
+
+ clear_bit(NIC_STATE_TESTING, &priv->state);
+
+ if (if_running)
+ (void)dev_open(ndev);
+ }
+ /* Online tests aren't run; pass by default */
+
+ (void)msleep_interruptible(4 * 1000);
+}
+
+/**
+ * hns_nic_get_drvinfo - get net driver info
+ * @dev: net device
+ * @drvinfo: driver info
+ */
+static void hns_nic_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+ assert(priv);
+
+ strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
+
+ strncpy(drvinfo->driver, HNAE_DRIVER_NAME, sizeof(drvinfo->driver));
+ drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
+
+ strncpy(drvinfo->bus_info, priv->dev->bus->name,
+ sizeof(drvinfo->bus_info));
+ drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
+
+ strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
+}
+
+/**
+ * hns_get_ringparam - get ring parameter
+ * @dev: net device
+ * @param: ethtool parameter
+ */
+void hns_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+ struct hnae_queue *queue;
+ u32 uplimit = 0;
+
+ queue = priv->ae_handle->qs[0];
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->get_ring_bdnum_limit)
+ ops->get_ring_bdnum_limit(queue, &uplimit);
+
+ param->rx_max_pending = uplimit;
+ param->tx_max_pending = uplimit;
+ param->rx_pending = queue->rx_ring.desc_num;
+ param->tx_pending = queue->tx_ring.desc_num;
+}
+
+/**
+ * hns_get_pauseparam - get pause parameter
+ * @dev: net device
+ * @param: pause parameter
+ */
+static void hns_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->get_pauseparam)
+ ops->get_pauseparam(priv->ae_handle, &param->autoneg,
+ &param->rx_pause, &param->tx_pause);
+}
+
+/**
+ * hns_set_pauseparam - set pause parameter
+ * @dev: net device
+ * @param: pause parameter
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ struct hnae_ae_ops *ops;
+
+ assert(priv || priv->ae_handle);
+
+ h = priv->ae_handle;
+ ops = h->dev->ops;
+
+ if (!ops->set_pauseparam)
+ return -ESRCH;
+
+ return ops->set_pauseparam(priv->ae_handle, param->autoneg,
+ param->rx_pause, param->tx_pause);
+}
+
+/**
+ * hns_get_coalesce - get coalesce info.
+ * @dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *ec)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ ec->use_adaptive_rx_coalesce = 1;
+ ec->use_adaptive_tx_coalesce = 1;
+
+ if ((!ops->get_coalesce_usecs) ||
+ (!ops->get_rx_max_coalesced_frames))
+ return -ESRCH;
+
+ ops->get_coalesce_usecs(priv->ae_handle,
+ &ec->tx_coalesce_usecs,
+ &ec->rx_coalesce_usecs);
+
+ ops->get_rx_max_coalesced_frames(
+ priv->ae_handle,
+ &ec->tx_max_coalesced_frames,
+ &ec->rx_max_coalesced_frames);
+
+ return 0;
+}
+
+/**
+ * hns_set_coalesce - set coalesce info.
+ * @dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *ec)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+ int ret;
+
+ assert(priv || priv->ae_handle);
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
+ return -EINVAL;
+
+ if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames)
+ return -EINVAL;
+
+ if ((!ops->set_coalesce_usecs) ||
+ (!ops->set_coalesce_frames))
+ return -ESRCH;
+
+ ops->set_coalesce_usecs(priv->ae_handle,
+ ec->rx_coalesce_usecs);
+
+ ret = ops->set_coalesce_frames(
+ priv->ae_handle,
+ ec->rx_max_coalesced_frames);
+
+ return ret;
+}
+
+/**
+ * hns_get_channels - get channel info.
+ * @dev: net device
+ * @ch: channel info.
+ */
+void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+ ch->max_rx = priv->ae_handle->q_num;
+ ch->max_tx = priv->ae_handle->q_num;
+
+ ch->rx_count = priv->ae_handle->q_num;
+ ch->tx_count = priv->ae_handle->q_num;
+}
+
+/**
+ * get_ethtool_stats - get detail statistics.
+ * @dev: net device
+ * @stats: statistics info.
+ * @data: statistics data.
+ */
+void hns_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u64 *p = data;
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ const struct rtnl_link_stats64 *net_stats;
+ struct rtnl_link_stats64 temp;
+
+ if (!h->dev->ops->get_stats || !h->dev->ops->update_stats) {
+ netdev_err(netdev, "get_stats or update_stats is null!\n");
+ return;
+ }
+
+ h->dev->ops->update_stats(h, &netdev->stats);
+
+ net_stats = dev_get_stats(netdev, &temp);
+
+ /* get netdev statistics */
+ p[0] = net_stats->rx_packets;
+ p[1] = net_stats->tx_packets;
+ p[2] = net_stats->rx_bytes;
+ p[3] = net_stats->tx_bytes;
+ p[4] = net_stats->rx_errors;
+ p[5] = net_stats->tx_errors;
+ p[6] = net_stats->rx_dropped;
+ p[7] = net_stats->tx_dropped;
+ p[8] = net_stats->multicast;
+ p[9] = net_stats->collisions;
+ p[10] = net_stats->rx_over_errors;
+ p[11] = net_stats->rx_crc_errors;
+ p[12] = net_stats->rx_frame_errors;
+ p[13] = net_stats->rx_fifo_errors;
+ p[14] = net_stats->rx_missed_errors;
+ p[15] = net_stats->tx_aborted_errors;
+ p[16] = net_stats->tx_carrier_errors;
+ p[17] = net_stats->tx_fifo_errors;
+ p[18] = net_stats->tx_heartbeat_errors;
+ p[19] = net_stats->rx_length_errors;
+ p[20] = net_stats->tx_window_errors;
+ p[21] = net_stats->rx_compressed;
+ p[22] = net_stats->tx_compressed;
+
+ p[23] = netdev->rx_dropped.counter;
+ p[24] = netdev->tx_dropped.counter;
+
+ p[25] = priv->tx_timeout_count;
+
+ /* get driver statistics */
+ h->dev->ops->get_stats(h, &p[26]);
+}
+
+/**
+ * get_strings: Return a set of strings that describe the requested objects
+ * @dev: net device
+ * @stats: string set ID.
+ * @data: objects data.
+ */
+void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ char *buff = (char *)data;
+
+ if (!h->dev->ops->get_strings) {
+ netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
+ return;
+ }
+
+ if (stringset == ETH_SS_TEST) {
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_MAC],
+ ETH_GSTRING_LEN);
+ buff += ETH_GSTRING_LEN;
+ }
+ memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
+ ETH_GSTRING_LEN);
+ buff += ETH_GSTRING_LEN;
+ if ((priv->phy) && (!priv->phy->is_c45))
+ memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
+ ETH_GSTRING_LEN);
+
+ } else {
+ snprintf(buff, ETH_GSTRING_LEN, "rx_packets");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_packets");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_bytes");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_bytes");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "multicast");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "collisions");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_over_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_crc_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_frame_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_fifo_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_missed_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_aborted_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_carrier_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_fifo_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_heartbeat_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_length_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_window_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_compressed");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_compressed");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "netdev_rx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_timeout");
+ buff = buff + ETH_GSTRING_LEN;
+
+ h->dev->ops->get_strings(h, stringset, (u8 *)buff);
+ }
+}
+
+/**
+ * nic_get_sset_count - get string set count witch returned by nic_get_strings.
+ * @dev: net device
+ * @stringset: string set index, 0: self test string; 1: statistics string.
+ *
+ * Return string set count.
+ */
+int hns_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+
+ if (!ops->get_sset_count) {
+ netdev_err(netdev, "get_sset_count is null!\n");
+ return -EOPNOTSUPP;
+ }
+ if (stringset == ETH_SS_TEST) {
+ u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN);
+
+ if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
+ cnt--;
+
+ if ((!priv->phy) || (priv->phy->is_c45))
+ cnt--;
+
+ return cnt;
+ } else {
+ return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+ }
+}
+
+/**
+ * hns_phy_led_set - set phy LED status.
+ * @dev: net device
+ * @value: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+int hns_phy_led_set(struct net_device *netdev, int value)
+{
+ int retval;
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy_dev = priv->phy;
+
+ if (!phy_dev->bus) {
+ netdev_err(netdev, "phy_dev->bus is null!\n");
+ return -EINVAL;
+ }
+ retval = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
+ retval = mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_LED_FC_REG,
+ value);
+ retval = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+ if (retval) {
+ netdev_err(netdev, "mdiobus_write fail !\n");
+ return retval;
+ }
+ return 0;
+}
+
+/**
+ * nic_set_phys_id - set phy identify LED.
+ * @dev: net device
+ * @state: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct phy_device *phy_dev = priv->phy;
+ int ret;
+
+ if (phy_dev)
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_LED);
+ if (ret)
+ return ret;
+
+ priv->phy_led_val = (u16)mdiobus_read(phy_dev->bus,
+ phy_dev->addr,
+ HNS_LED_FC_REG);
+
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+ if (ret)
+ return ret;
+ return 2;
+ case ETHTOOL_ID_ON:
+ ret = hns_phy_led_set(netdev, HNS_LED_FORCE_ON);
+ if (ret)
+ return ret;
+ break;
+ case ETHTOOL_ID_OFF:
+ ret = hns_phy_led_set(netdev, HNS_LED_FORCE_OFF);
+ if (ret)
+ return ret;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_LED);
+ if (ret)
+ return ret;
+
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_LED_FC_REG, priv->phy_led_val);
+ if (ret)
+ return ret;
+
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return h->dev->ops->set_led_id(h, HNAE_LED_ACTIVE);
+ case ETHTOOL_ID_ON:
+ return h->dev->ops->set_led_id(h, HNAE_LED_ON);
+ case ETHTOOL_ID_OFF:
+ return h->dev->ops->set_led_id(h, HNAE_LED_OFF);
+ case ETHTOOL_ID_INACTIVE:
+ return h->dev->ops->set_led_id(h, HNAE_LED_INACTIVE);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_get_regs - get net device register
+ * @dev: net device
+ * @cmd: ethtool cmd
+ * @date: register data
+ */
+void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+ void *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ assert(priv || priv->ae_handle);
+
+ ops = priv->ae_handle->dev->ops;
+
+ cmd->version = HNS_CHIP_VERSION;
+ if (!ops->get_regs) {
+ netdev_err(net_dev, "ops->get_regs is null!\n");
+ return;
+ }
+ ops->get_regs(priv->ae_handle, data);
+}
+
+/**
+ * nic_get_regs_len - get total register len.
+ * @dev: net device
+ *
+ * Return total register len.
+ */
+static int hns_get_regs_len(struct net_device *net_dev)
+{
+ u32 reg_num;
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ assert(priv || priv->ae_handle);
+
+ ops = priv->ae_handle->dev->ops;
+ if (!ops->get_regs_len) {
+ netdev_err(net_dev, "ops->get_regs_len is null!\n");
+ return -EOPNOTSUPP;
+ }
+
+ reg_num = ops->get_regs_len(priv->ae_handle);
+ if (reg_num > 0)
+ return reg_num * sizeof(u32);
+ else
+ return reg_num; /* error code */
+}
+
+/**
+ * hns_nic_nway_reset - nway reset
+ * @dev: net device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_nic_nway_reset(struct net_device *netdev)
+{
+ int ret = 0;
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy = priv->phy;
+
+ if (netif_running(netdev)) {
+ if (phy)
+ ret = genphy_restart_aneg(phy);
+ }
+
+ return ret;
+}
+
+static struct ethtool_ops hns_ethtool_ops = {
+ .get_drvinfo = hns_nic_get_drvinfo,
+ .get_link = hns_nic_get_link,
+ .get_settings = hns_nic_get_settings,
+ .set_settings = hns_nic_set_settings,
+ .get_ringparam = hns_get_ringparam,
+ .get_pauseparam = hns_get_pauseparam,
+ .set_pauseparam = hns_set_pauseparam,
+ .get_coalesce = hns_get_coalesce,
+ .set_coalesce = hns_set_coalesce,
+ .get_channels = hns_get_channels,
+ .self_test = hns_nic_self_test,
+ .get_strings = hns_get_strings,
+ .get_sset_count = hns_get_sset_count,
+ .get_ethtool_stats = hns_get_ethtool_stats,
+ .set_phys_id = hns_set_phys_id,
+ .get_regs_len = hns_get_regs_len,
+ .get_regs = hns_get_regs,
+ .nway_reset = hns_nic_nway_reset,
+};
+
+void hns_ethtool_set_ops(struct net_device *ndev)
+{
+ ndev->ethtool_ops = &hns_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
new file mode 100644
index 000000000000..37491c85bc42
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock_types.h>
+
+#define MDIO_DRV_NAME "Hi-HNS_MDIO"
+#define MDIO_BUS_NAME "Hisilicon MII Bus"
+#define MDIO_DRV_VERSION "1.3.0"
+#define MDIO_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define MDIO_DRV_STRING MDIO_BUS_NAME
+#define MDIO_DEFAULT_DEVICE_DESCR MDIO_BUS_NAME
+
+#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
+
+#define MDIO_TIMEOUT 1000000
+
+struct hns_mdio_device {
+ void *vbase; /* mdio reg base address */
+ struct regmap *subctrl_vbase;
+};
+
+/* mdio reg */
+#define MDIO_COMMAND_REG 0x0
+#define MDIO_ADDR_REG 0x4
+#define MDIO_WDATA_REG 0x8
+#define MDIO_RDATA_REG 0xc
+#define MDIO_STA_REG 0x10
+
+/* cfg phy bit map */
+#define MDIO_CMD_DEVAD_M 0x1f
+#define MDIO_CMD_DEVAD_S 0
+#define MDIO_CMD_PRTAD_M 0x1f
+#define MDIO_CMD_PRTAD_S 5
+#define MDIO_CMD_OP_M 0x3
+#define MDIO_CMD_OP_S 10
+#define MDIO_CMD_ST_M 0x3
+#define MDIO_CMD_ST_S 12
+#define MDIO_CMD_START_B 14
+
+#define MDIO_ADDR_DATA_M 0xffff
+#define MDIO_ADDR_DATA_S 0
+
+#define MDIO_WDATA_DATA_M 0xffff
+#define MDIO_WDATA_DATA_S 0
+
+#define MDIO_RDATA_DATA_M 0xffff
+#define MDIO_RDATA_DATA_S 0
+
+#define MDIO_STATE_STA_B 0
+
+enum mdio_st_clause {
+ MDIO_ST_CLAUSE_45 = 0,
+ MDIO_ST_CLAUSE_22
+};
+
+enum mdio_c22_op_seq {
+ MDIO_C22_WRITE = 1,
+ MDIO_C22_READ = 2
+};
+
+enum mdio_c45_op_seq {
+ MDIO_C45_WRITE_ADDR = 0,
+ MDIO_C45_WRITE_DATA,
+ MDIO_C45_READ_INCREMENT,
+ MDIO_C45_READ
+};
+
+/* peri subctrl reg */
+#define MDIO_SC_CLK_EN 0x338
+#define MDIO_SC_CLK_DIS 0x33C
+#define MDIO_SC_RESET_REQ 0xA38
+#define MDIO_SC_RESET_DREQ 0xA3C
+#define MDIO_SC_CTRL 0x2010
+#define MDIO_SC_CLK_ST 0x531C
+#define MDIO_SC_RESET_ST 0x5A1C
+
+static void mdio_write_reg(void *base, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = (u8 __iomem *)base;
+
+ writel_relaxed(value, reg_addr + reg);
+}
+
+#define MDIO_WRITE_REG(a, reg, value) \
+ mdio_write_reg((a)->vbase, (reg), (value))
+
+static u32 mdio_read_reg(void *base, u32 reg)
+{
+ u8 __iomem *reg_addr = (u8 __iomem *)base;
+
+ return readl_relaxed(reg_addr + reg);
+}
+
+#define mdio_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~((mask) << (shift))); \
+ (origin) |= (((val) & (mask)) << (shift)); \
+ } while (0)
+
+#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
+
+static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+ u32 val)
+{
+ u32 origin = mdio_read_reg(base, reg);
+
+ mdio_set_field(origin, mask, shift, val);
+ mdio_write_reg(base, reg, origin);
+}
+
+#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
+ mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
+
+static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+{
+ u32 origin;
+
+ origin = mdio_read_reg(base, reg);
+ return mdio_get_field(origin, mask, shift);
+}
+
+#define MDIO_GET_REG_FIELD(dev, reg, mask, shift) \
+ mdio_get_reg_field((dev)->vbase, (reg), (mask), (shift))
+
+#define MDIO_GET_REG_BIT(dev, reg, bit) \
+ mdio_get_reg_field((dev)->vbase, (reg), 0x1ull, (bit))
+
+#define MDIO_CHECK_SET_ST 1
+#define MDIO_CHECK_CLR_ST 0
+
+static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
+ u32 cfg_reg, u32 set_val,
+ u32 st_reg, u32 st_msk, u8 check_st)
+{
+ u32 time_cnt;
+ u32 reg_value;
+
+ regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
+
+ for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
+ regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+ reg_value &= st_msk;
+ if ((!!check_st) == (!!reg_value))
+ break;
+ }
+
+ if ((!!check_st) != (!!reg_value))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hns_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hns_mdio_device *mdio_dev = bus->priv;
+ int i;
+ u32 cmd_reg_value = 1;
+
+ /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+ /* after that can do read or write*/
+ for (i = 0; cmd_reg_value; i++) {
+ cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
+ MDIO_COMMAND_REG,
+ MDIO_CMD_START_B);
+ if (i == MDIO_TIMEOUT)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
+ u8 is_c45, u8 op, u8 phy_id, u16 cmd)
+{
+ u32 cmd_reg_value;
+ u8 st = is_c45 ? MDIO_ST_CLAUSE_45 : MDIO_ST_CLAUSE_22;
+
+ cmd_reg_value = st << MDIO_CMD_ST_S;
+ cmd_reg_value |= op << MDIO_CMD_OP_S;
+ cmd_reg_value |=
+ (phy_id & MDIO_CMD_PRTAD_M) << MDIO_CMD_PRTAD_S;
+ cmd_reg_value |= (cmd & MDIO_CMD_DEVAD_M) << MDIO_CMD_DEVAD_S;
+ cmd_reg_value |= 1 << MDIO_CMD_START_B;
+
+ MDIO_WRITE_REG(mdio_dev, MDIO_COMMAND_REG, cmd_reg_value);
+}
+
+/**
+ * hns_mdio_write - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @value: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write(struct mii_bus *bus,
+ int phy_id, int regnum, u16 data)
+{
+ int ret;
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u8 devad = ((regnum >> 16) & 0x1f);
+ u8 is_c45 = !!(regnum & MII_ADDR_C45);
+ u16 reg = (u16)(regnum & 0xffff);
+ u8 op;
+ u16 cmd_reg_cfg;
+
+ dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x, write data=%d\n",
+ phy_id, is_c45, devad, reg, data);
+
+ /* wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ if (!is_c45) {
+ cmd_reg_cfg = reg;
+ op = MDIO_C22_WRITE;
+ } else {
+ /* config the cmd-reg to write addr*/
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ /* config the data needed writing */
+ cmd_reg_cfg = devad;
+ op = MDIO_C45_WRITE_ADDR;
+ }
+
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+ MDIO_WDATA_DATA_S, data);
+
+ hns_mdio_cmd_write(mdio_dev, is_c45, op, phy_id, cmd_reg_cfg);
+
+ return 0;
+}
+
+/**
+ * hns_mdio_read - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @value: register value
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ int ret;
+ u16 reg_val = 0;
+ u8 devad = ((regnum >> 16) & 0x1f);
+ u8 is_c45 = !!(regnum & MII_ADDR_C45);
+ u16 reg = (u16)(regnum & 0xffff);
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+
+ dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x!\n",
+ phy_id, is_c45, devad, reg);
+
+ /* Step 1: wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ if (!is_c45) {
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C22_READ, phy_id, reg);
+ } else {
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ /* Step 2; config the cmd-reg to write addr*/
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* Step 3: check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C45_WRITE_ADDR, phy_id, devad);
+ }
+
+ /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+ if (reg_val) {
+ dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+ return -EBUSY;
+ }
+
+ /* Step 6; get out data*/
+ reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+ MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+ return reg_val;
+}
+
+/**
+ * hns_mdio_reset - reset mdio bus
+ * @bus: mdio bus
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_reset(struct mii_bus *bus)
+{
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ int ret;
+
+ if (!mdio_dev->subctrl_vbase) {
+ dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+ return -ENODEV;
+ }
+
+ /*1. reset req, and read reset st check*/
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
+ MDIO_SC_RESET_ST, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO reset fail\n");
+ return ret;
+ }
+
+ /*2. dis clk, and read clk st check*/
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
+ 0x1, MDIO_SC_CLK_ST, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+
+ /*3. reset dreq, and read reset st check*/
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
+ MDIO_SC_RESET_ST, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+
+ /*4. en clk, and read clk st check*/
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
+ 0x1, MDIO_SC_CLK_ST, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret)
+ dev_err(&bus->dev, "MDIO en clk fail\n");
+
+ return ret;
+}
+
+/**
+ * hns_mdio_bus_name - get mdio bus name
+ * @name: mdio bus name
+ * @np: mdio device node pointer
+ */
+static void hns_mdio_bus_name(char *name, struct device_node *np)
+{
+ const u32 *addr;
+ u64 taddr = OF_BAD_ADDR;
+
+ addr = of_get_address(np, 0, NULL, NULL);
+ if (addr)
+ taddr = of_translate_address(np, addr);
+
+ snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+ (unsigned long long)taddr);
+}
+
+/**
+ * hns_mdio_probe - probe mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct hns_mdio_device *mdio_dev;
+ struct mii_bus *new_bus;
+ struct resource *res;
+ int ret;
+
+ if (!pdev) {
+ dev_err(NULL, "pdev is NULL!\r\n");
+ return -ENODEV;
+ }
+ np = pdev->dev.of_node;
+ mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
+ if (!mdio_dev)
+ return -ENOMEM;
+
+ new_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!new_bus) {
+ dev_err(&pdev->dev, "mdiobus_alloc fail!\n");
+ return -ENOMEM;
+ }
+
+ new_bus->name = MDIO_BUS_NAME;
+ new_bus->read = hns_mdio_read;
+ new_bus->write = hns_mdio_write;
+ new_bus->reset = hns_mdio_reset;
+ new_bus->priv = mdio_dev;
+ hns_mdio_bus_name(new_bus->id, np);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mdio_dev->vbase)) {
+ ret = PTR_ERR(mdio_dev->vbase);
+ return ret;
+ }
+
+ mdio_dev->subctrl_vbase =
+ syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0));
+ if (IS_ERR(mdio_dev->subctrl_vbase)) {
+ dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
+ mdio_dev->subctrl_vbase = NULL;
+ }
+ new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR,
+ sizeof(int), GFP_KERNEL);
+ if (!new_bus->irq)
+ return -ENOMEM;
+
+ new_bus->parent = &pdev->dev;
+ platform_set_drvdata(pdev, new_bus);
+
+ ret = of_mdiobus_register(new_bus, np);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_mdio_remove - remove mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus;
+
+ bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static const struct of_device_id hns_mdio_match[] = {
+ {.compatible = "hisilicon,mdio"},
+ {.compatible = "hisilicon,hns-mdio"},
+ {}
+};
+
+static struct platform_driver hns_mdio_driver = {
+ .probe = hns_mdio_probe,
+ .remove = hns_mdio_remove,
+ .driver = {
+ .name = MDIO_DRV_NAME,
+ .of_match_table = hns_mdio_match,
+ },
+};
+
+module_platform_driver(hns_mdio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("Hisilicon HNS MDIO driver");
+MODULE_ALIAS("platform:" MDIO_DRV_NAME);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b60a34d982a9..5d7db6c01c46 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2204,7 +2204,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
dev->cell_index, dev->ofdev->dev.of_node->full_name);
- info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
static const struct ethtool_ops emac_ethtool_ops = {
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index ac02c675c59c..93ae11494810 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -181,7 +181,7 @@ struct emac_instance {
struct mal_commac commac;
/* PHY infos */
- u32 phy_mode;
+ int phy_mode;
u32 phy_map;
u32 phy_address;
u32 phy_feat_exc;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 4270ad2d4ddf..83e557c7f279 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -559,8 +559,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = e1000_get_regs_len(netdev);
- drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
static void e1000_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 45c8c864104e..b1af0d613caa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3900,10 +3900,6 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return E1000_SUCCESS;
}
- /* If eeprom is not yet detected, do so now */
- if (eeprom->word_size == 0)
- e1000_init_eeprom_params(hw);
-
/* A check for invalid values: offset too large, too many words, and
* not enough words.
*/
@@ -4074,10 +4070,6 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return E1000_SUCCESS;
}
- /* If eeprom is not yet detected, do so now */
- if (eeprom->word_size == 0)
- e1000_init_eeprom_params(hw);
-
/* A check for invalid values: offset too large, too many words, and
* not enough words.
*/
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 74dc15055971..fd7be860c201 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3820,7 +3820,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
if (work_done < budget) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index ad6daa656d3e..6cab1f30d41e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -648,8 +648,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = e1000_get_regs_len(netdev);
- drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
static void e1000_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index faf4b3f3d0b5..0a854a47d31a 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2693,7 +2693,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
if (work_done < weight) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
ew32(IMS, adapter->rx_ring->ims_val);
@@ -6952,6 +6952,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
#endif
.ndo_set_features = e1000_set_features,
.ndo_fix_features = e1000_fix_features,
+ .ndo_features_check = passthru_features_check,
};
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index c8c8c5baefda..14440200499b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -101,12 +101,19 @@ struct fm10k_tx_queue_stats {
u64 csum_err;
u64 tx_busy;
u64 tx_done_old;
+ u64 csum_good;
};
struct fm10k_rx_queue_stats {
u64 alloc_failed;
u64 csum_err;
u64 errors;
+ u64 csum_good;
+ u64 switch_errors;
+ u64 drops;
+ u64 pp_errors;
+ u64 link_errors;
+ u64 length_errors;
};
struct fm10k_ring {
@@ -251,6 +258,7 @@ struct fm10k_intfc {
#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2)
#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3)
#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4)
+#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5)
int xcast_mode;
/* Tx fast path data */
@@ -277,6 +285,17 @@ struct fm10k_intfc {
u64 rx_drops_nic;
u64 rx_overrun_pf;
u64 rx_overrun_vf;
+
+ /* Debug Statistics */
+ u64 hw_sm_mbx_full;
+ u64 hw_csum_tx_good;
+ u64 hw_csum_rx_good;
+ u64 rx_switch_errors;
+ u64 rx_drops;
+ u64 rx_pp_errors;
+ u64 rx_link_errors;
+ u64 rx_length_errors;
+
u32 tx_timeout_count;
/* RX */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index f45b4d71adb8..5304bc1fbecd 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -37,7 +37,8 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos)
}
static void *fm10k_dbg_desc_seq_next(struct seq_file *s,
- void __always_unused *v, loff_t *pos)
+ void __always_unused *v,
+ loff_t *pos)
{
struct fm10k_ring *ring = s->private;
@@ -45,7 +46,7 @@ static void *fm10k_dbg_desc_seq_next(struct seq_file *s,
}
static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s,
- __always_unused void *v)
+ void __always_unused *v)
{
/* Do nothing. */
}
@@ -175,7 +176,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
return;
/* Generate a folder for each q_vector */
- sprintf(name, "q_vector.%03d", q_vector->v_idx);
+ snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx);
q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
if (!q_vector->dbg_q_vector)
@@ -185,7 +186,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
for (i = 0; i < q_vector->tx.count; i++) {
struct fm10k_ring *ring = &q_vector->tx.ring[i];
- sprintf(name, "tx_ring.%03d", ring->queue_index);
+ snprintf(name, sizeof(name), "tx_ring.%03d", ring->queue_index);
debugfs_create_file(name, 0600,
q_vector->dbg_q_vector, ring,
@@ -196,7 +197,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
for (i = 0; i < q_vector->rx.count; i++) {
struct fm10k_ring *ring = &q_vector->rx.ring[i];
- sprintf(name, "rx_ring.%03d", ring->queue_index);
+ snprintf(name, sizeof(name), "rx_ring.%03d", ring->queue_index);
debugfs_create_file(name, 0600,
q_vector->dbg_q_vector, ring,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index c6dc9683429e..2ce0eba5e040 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -76,19 +76,22 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
- FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy),
- FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped),
- FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages),
- FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords),
- FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages),
- FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords),
- FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err),
-
FM10K_STAT("tx_hang_count", tx_timeout_count),
FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
};
+static const struct fm10k_stats fm10k_gstrings_debug_stats[] = {
+ FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full),
+ FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good),
+ FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good),
+ FM10K_STAT("rx_switch_errors", rx_switch_errors),
+ FM10K_STAT("rx_drops", rx_drops),
+ FM10K_STAT("rx_pp_errors", rx_pp_errors),
+ FM10K_STAT("rx_link_errors", rx_link_errors),
+ FM10K_STAT("rx_length_errors", rx_length_errors),
+};
+
static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
FM10K_STAT("timeout", stats.timeout.count),
FM10K_STAT("ur", stats.ur.count),
@@ -100,14 +103,33 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
};
+#define FM10K_MBX_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \
+ .stat_offset = offsetof(struct fm10k_mbx_info, _stat) \
+}
+
+static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
+ FM10K_MBX_STAT("mbx_tx_busy", tx_busy),
+ FM10K_MBX_STAT("mbx_tx_oversized", tx_dropped),
+ FM10K_MBX_STAT("mbx_tx_messages", tx_messages),
+ FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords),
+ FM10K_MBX_STAT("mbx_rx_messages", rx_messages),
+ FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords),
+ FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err),
+};
+
#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
+#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats)
#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
+#define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
#define FM10K_QUEUE_STATS_LEN(_n) \
( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
- FM10K_NETDEV_STATS_LEN)
+ FM10K_NETDEV_STATS_LEN + \
+ FM10K_MBX_STATS_LEN)
static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = {
"Mailbox test (on/offline)"
@@ -120,47 +142,97 @@ enum fm10k_self_test_types {
FM10K_TEST_MAX = FM10K_TEST_LEN
};
-static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+enum {
+ FM10K_PRV_FLAG_DEBUG_STATS,
+ FM10K_PRV_FLAG_LEN,
+};
+
+static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "debug-statistics",
+};
+
+static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
char *p = (char *)data;
unsigned int i;
+ unsigned int j;
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, *fm10k_gstrings_test,
- FM10K_TEST_LEN * ETH_GSTRING_LEN);
- break;
- case ETH_SS_STATS:
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
+ for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
+ for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_debug_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
+ }
+
+ for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ if (interface->hw.mac.type != fm10k_mac_vf) {
+ for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ }
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
+ for (i = 0; i < iov_data->num_vfs; i++) {
+ for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
+ snprintf(p,
+ ETH_GSTRING_LEN,
+ "vf_%u_%s", i,
+ fm10k_gstrings_mbx_stats[j].stat_string);
p += ETH_GSTRING_LEN;
}
}
+ }
- for (i = 0; i < interface->hw.mac.max_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < interface->hw.mac.max_queues; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+static void fm10k_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ char *p = (char *)data;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *fm10k_gstrings_test,
+ FM10K_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ fm10k_get_stat_strings(dev, data);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(p, fm10k_prv_flags,
+ FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
break;
}
}
@@ -168,6 +240,7 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int fm10k_get_sset_count(struct net_device *dev, int sset)
{
struct fm10k_intfc *interface = netdev_priv(dev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
int stats_len = FM10K_STATIC_STATS_LEN;
@@ -180,7 +253,16 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
if (hw->mac.type != fm10k_mac_vf)
stats_len += FM10K_PF_STATS_LEN;
+ if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
+ stats_len += FM10K_DEBUG_STATS_LEN;
+
+ if (iov_data)
+ stats_len += FM10K_MBX_STATS_LEN * iov_data->num_vfs;
+ }
+
return stats_len;
+ case ETH_SS_PRIV_FLAGS:
+ return FM10K_PRV_FLAG_LEN;
default:
return -EOPNOTSUPP;
}
@@ -192,6 +274,7 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
{
const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
struct net_device_stats *net_stats = &netdev->stats;
char *p;
int i, j;
@@ -211,13 +294,47 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- if (interface->hw.mac.type != fm10k_mac_vf)
+ if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
+ for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
+ p = (char *)interface + fm10k_gstrings_debug_stats[i].stat_offset;
+ *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ }
+
+ for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
+ p = (char *)&interface->hw.mbx + fm10k_gstrings_mbx_stats[i].stat_offset;
+ *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ if (interface->hw.mac.type != fm10k_mac_vf) {
for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
p = (char *)interface +
fm10k_gstrings_pf_stats[i].stat_offset;
*(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ }
+
+ if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
+ for (i = 0; i < iov_data->num_vfs; i++) {
+ struct fm10k_vf_info *vf_info;
+ vf_info = &iov_data->vf_info[i];
+
+ /* skip stats if we don't have a vf info */
+ if (!vf_info) {
+ data += FM10K_MBX_STATS_LEN;
+ continue;
+ }
+
+ for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
+ p = (char *)&vf_info->mbx + fm10k_gstrings_mbx_stats[j].stat_offset;
+ *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ }
+ }
for (i = 0; i < interface->hw.mac.max_queues; i++) {
struct fm10k_ring *ring;
@@ -398,10 +515,6 @@ static void fm10k_get_drvinfo(struct net_device *dev,
sizeof(info->version) - 1);
strncpy(info->bus_info, pci_name(interface->pdev),
sizeof(info->bus_info) - 1);
-
- info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS);
-
- info->regdump_len = fm10k_get_regs_len(dev);
}
static void fm10k_get_pauseparam(struct net_device *dev,
@@ -881,6 +994,33 @@ static void fm10k_self_test(struct net_device *dev,
eth_test->flags |= ETH_TEST_FL_FAILED;
}
+static u32 fm10k_get_priv_flags(struct net_device *netdev)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (interface->flags & FM10K_FLAG_DEBUG_STATS)
+ priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS;
+
+ return priv_flags;
+}
+
+static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+
+ if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN))
+ return -EINVAL;
+
+ if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS))
+ interface->flags |= FM10K_FLAG_DEBUG_STATS;
+ else
+ interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
+
+ return 0;
+}
+
+
static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
{
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
@@ -1094,6 +1234,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_regs = fm10k_get_regs,
.get_regs_len = fm10k_get_regs_len,
.self_test = fm10k_self_test,
+ .get_priv_flags = fm10k_get_priv_flags,
+ .set_priv_flags = fm10k_set_priv_flags,
.get_rxfh_indir_size = fm10k_get_reta_size,
.get_rxfh_key_size = fm10k_get_rssrk_size,
.get_rxfh = fm10k_get_rssh,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 94571e6e790c..acfb8b1f88a7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -137,8 +137,11 @@ process_mbx:
}
/* guarantee we have free space in the SM mailbox */
- if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
+ if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
+ /* keep track of how many times this occurs */
+ interface->hw_sm_mbx_full++;
break;
+ }
/* cleanup mailbox and process received messages */
mbx->ops.process(hw, mbx);
@@ -228,9 +231,6 @@ int fm10k_iov_resume(struct pci_dev *pdev)
hw->iov.ops.set_lport(hw, vf_info, i,
FM10K_VF_FLAG_MULTI_CAPABLE);
- /* assign our default vid to the VF following reset */
- vf_info->sw_vid = hw->mac.default_vid;
-
/* mailbox is disconnected so we don't send a message */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index b5b2925103ec..e76a44cf330c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -398,6 +398,8 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
return;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ ring->rx_stats.csum_good++;
}
#define FM10K_RSS_L4_TYPES_MASK \
@@ -497,8 +499,11 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
if (rx_desc->w.vlan) {
u16 vid = le16_to_cpu(rx_desc->w.vlan);
- if (vid != rx_ring->vid)
+ if ((vid & VLAN_VID_MASK) != rx_ring->vid)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ else if (vid & VLAN_PRIO_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vid & VLAN_PRIO_MASK);
}
fm10k_type_trans(rx_ring, rx_desc, skb);
@@ -553,6 +558,18 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
{
if (unlikely((fm10k_test_staterr(rx_desc,
FM10K_RXD_STATUS_RXE)))) {
+#define FM10K_TEST_RXD_BIT(rxd, bit) \
+ ((rxd)->w.csum_err & cpu_to_le16(bit))
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR))
+ rx_ring->rx_stats.switch_errors++;
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR))
+ rx_ring->rx_stats.drops++;
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR))
+ rx_ring->rx_stats.pp_errors++;
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY))
+ rx_ring->rx_stats.link_errors++;
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG))
+ rx_ring->rx_stats.length_errors++;
dev_kfree_skb_any(skb);
rx_ring->rx_stats.errors++;
return true;
@@ -576,9 +593,9 @@ static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb);
}
-static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
- struct fm10k_ring *rx_ring,
- int budget)
+static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
+ struct fm10k_ring *rx_ring,
+ int budget)
{
struct sk_buff *skb = rx_ring->skb;
unsigned int total_bytes = 0, total_packets = 0;
@@ -645,7 +662,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
q_vector->rx.total_packets += total_packets;
q_vector->rx.total_bytes += total_bytes;
- return total_packets < budget;
+ return total_packets;
}
#define VXLAN_HLEN (sizeof(struct udphdr) + 8)
@@ -878,6 +895,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
/* update TX checksum flag */
first->tx_flags |= FM10K_TX_FLAGS_CSUM;
+ tx_ring->tx_stats.csum_good++;
no_csum:
/* populate Tx descriptor header size and mss */
@@ -1079,9 +1097,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_tx_buffer *first;
int tso;
u32 tx_flags = 0;
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
unsigned short f;
-#endif
u16 count = TXD_USE_COUNT(skb_headlen(skb));
/* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
@@ -1089,12 +1105,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* + 2 desc gap to keep tail from touching head
* otherwise try next time
*/
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
@@ -1409,7 +1422,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
struct fm10k_q_vector *q_vector =
container_of(napi, struct fm10k_q_vector, napi);
struct fm10k_ring *ring;
- int per_ring_budget;
+ int per_ring_budget, work_done = 0;
bool clean_complete = true;
fm10k_for_each_ring(ring, q_vector->tx)
@@ -1423,16 +1436,19 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
- fm10k_for_each_ring(ring, q_vector->rx)
- clean_complete &= fm10k_clean_rx_irq(q_vector, ring,
- per_ring_budget);
+ fm10k_for_each_ring(ring, q_vector->rx) {
+ int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
+
+ work_done += work;
+ clean_complete &= !!(work < per_ring_budget);
+ }
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
/* all work done, exit the polling mode */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* re-enable the q_vector */
fm10k_qv_enable(q_vector);
@@ -1892,7 +1908,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
u32 reta, base;
/* If the netdev is initialized we have to maintain table if possible */
- if (interface->netdev->reg_state) {
+ if (interface->netdev->reg_state != NETREG_UNINITIALIZED) {
for (i = FM10K_RETA_SIZE; i--;) {
reta = interface->reta[i];
if ((((reta << 24) >> 24) < rss_i) &&
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 1a4b52637de9..af09a1b272e6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -129,8 +129,8 @@ static u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo)
* fm10k_fifo_drop_all - Drop all messages in FIFO
* @fifo: pointer to FIFO
*
- * This function resets the head pointer to drop all messages in the FIFO,
- * and ensure the FIFO is empty.
+ * This function resets the head pointer to drop all messages in the FIFO and
+ * ensure the FIFO is empty.
**/
static void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo)
{
@@ -899,6 +899,27 @@ static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx)
}
/**
+ * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function creates a fake disconnect header for loading into remote
+ * mailbox header. The primary purpose is to prevent errors on immediate
+ * start up after mbx->connect.
+ **/
+static void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD);
+ u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1);
+
+ mbx->mbx_lock |= FM10K_MBX_ACK;
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
* fm10k_mbx_create_error_msg - Generate a error message
* @mbx: pointer to mailbox
* @err: local error encountered
@@ -1046,9 +1067,26 @@ static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw,
**/
static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx)
{
+ u16 len, head, ack;
+
/* reset our outgoing max size back to Rx limits */
mbx->max_size = mbx->rx.size - 1;
+ /* update mbx->pulled to account for tail_len and ack */
+ head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD);
+ ack = fm10k_mbx_index_len(mbx, head, mbx->tail);
+ mbx->pulled += mbx->tail_len - ack;
+
+ /* now drop any messages which have started or finished transmitting */
+ while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) {
+ len = fm10k_fifo_head_drop(&mbx->tx);
+ mbx->tx_dropped++;
+ if (mbx->pulled >= len)
+ mbx->pulled -= len;
+ else
+ mbx->pulled = 0;
+ }
+
/* just do a quick resysnc to start of message */
mbx->pushed = 0;
mbx->pulled = 0;
@@ -1418,8 +1456,10 @@ static s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
/* Place mbx in ready to connect state */
mbx->state = FM10K_STATE_CONNECT;
+ fm10k_mbx_reset_work(mbx);
+
/* initialize header of remote mailbox */
- fm10k_mbx_create_disconnect_hdr(mbx);
+ fm10k_mbx_create_fake_disconnect_hdr(mbx);
fm10k_write_reg(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr);
/* enable interrupt and notify other party of new message */
@@ -1725,7 +1765,7 @@ static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw,
mbx->state = FM10K_STATE_CLOSED;
mbx->remote = 0;
fm10k_mbx_reset_work(mbx);
- fm10k_mbx_update_max_size(mbx, 0);
+ fm10k_fifo_drop_all(&mbx->tx);
fm10k_write_reg(hw, mbx->mbmem_reg, 0);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 99228bf46c12..639263d5e833 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -758,6 +758,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_hw *hw = &interface->hw;
s32 err;
+ int i;
/* updates do not apply to VLAN 0 */
if (!vid)
@@ -775,8 +776,25 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
if (!set)
clear_bit(vid, interface->active_vlans);
- /* if default VLAN is already present do nothing */
- if (vid == hw->mac.default_vid)
+ /* disable the default VID on ring if we have an active VLAN */
+ for (i = 0; i < interface->num_rx_queues; i++) {
+ struct fm10k_ring *rx_ring = interface->rx_ring[i];
+ u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1);
+
+ if (test_bit(rx_vid, interface->active_vlans))
+ rx_ring->vid |= FM10K_VLAN_CLEAR;
+ else
+ rx_ring->vid &= ~FM10K_VLAN_CLEAR;
+ }
+
+ /* Do not remove default VID related entries from VLAN and MAC tables */
+ if (!set && vid == hw->mac.default_vid)
+ return 0;
+
+ /* Do not throw an error if the interface is down. We will sync once
+ * we come up
+ */
+ if (test_bit(__FM10K_DOWN, &interface->state))
return 0;
fm10k_mbx_lock(interface);
@@ -996,21 +1014,6 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
int xcast_mode;
u16 vid, glort;
- /* restore our address if perm_addr is set */
- if (hw->mac.type == fm10k_mac_vf) {
- if (is_valid_ether_addr(hw->mac.perm_addr)) {
- ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
- ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
- }
-
- if (hw->mac.vlan_override)
- netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- else
- netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
-
/* record glort for this interface */
glort = interface->glort;
@@ -1045,7 +1048,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
vid, true, 0);
}
- /* update xcast mode before syncronizing addresses */
+ /* update xcast mode before synchronizing addresses */
hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
/* synchronize all of the addresses */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index ce53ff25f88d..74be792f3f1b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -170,6 +170,21 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
+ /* update hardware address for VFs if perm_addr has changed */
+ if (hw->mac.type == fm10k_mac_vf) {
+ if (is_valid_ether_addr(hw->mac.perm_addr)) {
+ ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
+ ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
+ ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
+ }
+
+ if (hw->mac.vlan_override)
+ netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
/* reset clock */
fm10k_ts_reset(interface);
@@ -259,8 +274,6 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
* @interface: board private structure
*
* This function will process both the upstream and downstream mailboxes.
- * It is necessary for us to hold the rtnl_lock while doing this as the
- * mailbox accesses are protected by this lock.
**/
static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
{
@@ -315,6 +328,9 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
{
struct net_device_stats *net_stats = &interface->netdev->stats;
struct fm10k_hw *hw = &interface->hw;
+ u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0;
+ u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0;
+ u64 rx_link_errors = 0;
u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
@@ -334,6 +350,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
tx_csum_errors += tx_ring->tx_stats.csum_err;
bytes += tx_ring->stats.bytes;
pkts += tx_ring->stats.packets;
+ hw_csum_tx_good += tx_ring->tx_stats.csum_good;
}
interface->restart_queue = restart_queue;
@@ -341,6 +358,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
net_stats->tx_bytes = bytes;
net_stats->tx_packets = pkts;
interface->tx_csum_errors = tx_csum_errors;
+ interface->hw_csum_tx_good = hw_csum_tx_good;
+
/* gather some stats to the interface struct that are per queue */
for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
struct fm10k_ring *rx_ring = interface->rx_ring[i];
@@ -350,12 +369,24 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
alloc_failed += rx_ring->rx_stats.alloc_failed;
rx_csum_errors += rx_ring->rx_stats.csum_err;
rx_errors += rx_ring->rx_stats.errors;
+ hw_csum_rx_good += rx_ring->rx_stats.csum_good;
+ rx_switch_errors += rx_ring->rx_stats.switch_errors;
+ rx_drops += rx_ring->rx_stats.drops;
+ rx_pp_errors += rx_ring->rx_stats.pp_errors;
+ rx_link_errors += rx_ring->rx_stats.link_errors;
+ rx_length_errors += rx_ring->rx_stats.length_errors;
}
net_stats->rx_bytes = bytes;
net_stats->rx_packets = pkts;
interface->alloc_failed = alloc_failed;
interface->rx_csum_errors = rx_csum_errors;
+ interface->hw_csum_rx_good = hw_csum_rx_good;
+ interface->rx_switch_errors = rx_switch_errors;
+ interface->rx_drops = rx_drops;
+ interface->rx_pp_errors = rx_pp_errors;
+ interface->rx_link_errors = rx_link_errors;
+ interface->rx_length_errors = rx_length_errors;
hw->mac.ops.update_hw_stats(hw, &interface->stats);
@@ -483,7 +514,7 @@ static void fm10k_service_task(struct work_struct *work)
interface = container_of(work, struct fm10k_intfc, service_task);
- /* tasks always capable of running, but must be rtnl protected */
+ /* tasks run even when interface is down */
fm10k_mbx_subtask(interface);
fm10k_detach_subtask(interface);
fm10k_reset_subtask(interface);
@@ -663,6 +694,10 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* assign default VLAN to queue */
ring->vid = hw->mac.default_vid;
+ /* if we have an active VLAN, disable default VID */
+ if (test_bit(hw->mac.default_vid, interface->active_vlans))
+ ring->vid |= FM10K_VLAN_CLEAR;
+
/* Map interrupt */
if (ring->q_vector) {
rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
@@ -861,10 +896,12 @@ void fm10k_netpoll(struct net_device *netdev)
#endif
#define FM10K_ERR_MSG(type) case (type): error = #type; break
-static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
+static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
struct fm10k_fault *fault)
{
struct pci_dev *pdev = interface->pdev;
+ struct fm10k_hw *hw = &interface->hw;
+ struct fm10k_iov_data *iov_data = interface->iov_data;
char *error;
switch (type) {
@@ -918,6 +955,30 @@ static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
"%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
error, fault->address, fault->specinfo,
PCI_SLOT(fault->func), PCI_FUNC(fault->func));
+
+ /* For VF faults, clear out the respective LPORT, reset the queue
+ * resources, and then reconnect to the mailbox. This allows the
+ * VF in question to resume behavior. For transient faults that are
+ * the result of non-malicious behavior this will log the fault and
+ * allow the VF to resume functionality. Obviously for malicious VFs
+ * they will be able to attempt malicious behavior again. In this
+ * case, the system administrator will need to step in and manually
+ * remove or disable the VF in question.
+ */
+ if (fault->func && iov_data) {
+ int vf = fault->func - 1;
+ struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
+
+ hw->iov.ops.reset_lport(hw, vf_info);
+ hw->iov.ops.reset_resources(hw, vf_info);
+
+ /* reset_lport disables the VF, so re-enable it */
+ hw->iov.ops.set_lport(hw, vf_info, vf,
+ FM10K_VF_FLAG_MULTI_CAPABLE);
+
+ /* reset_resources will disconnect from the mbx */
+ vf_info->mbx.ops.connect(hw, &vf_info->mbx);
+ }
}
static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
@@ -941,7 +1002,7 @@ static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
continue;
}
- fm10k_print_fault(interface, type, &fault);
+ fm10k_handle_fault(interface, type, &fault);
}
}
@@ -1705,22 +1766,86 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
static void fm10k_slot_warn(struct fm10k_intfc *interface)
{
- struct device *dev = &interface->pdev->dev;
+ enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+ enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
struct fm10k_hw *hw = &interface->hw;
+ int max_gts = 0, expected_gts = 0;
+
+ if (pcie_get_minimum_link(interface->pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+ dev_warn(&interface->pdev->dev,
+ "Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 2 * width;
+ break;
+ case PCIE_SPEED_5_0GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 4 * width;
+ break;
+ case PCIE_SPEED_8_0GT:
+ /* 128b/130b encoding has less than 2% impact on throughput */
+ max_gts = 8 * width;
+ break;
+ default:
+ dev_warn(&interface->pdev->dev,
+ "Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ dev_info(&interface->pdev->dev,
+ "PCI Express bandwidth of %dGT/s available\n",
+ max_gts);
+ dev_info(&interface->pdev->dev,
+ "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n",
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+ "Unknown"),
+ hw->bus.width,
+ (speed == PCIE_SPEED_2_5GT ? "20%" :
+ speed == PCIE_SPEED_5_0GT ? "20%" :
+ speed == PCIE_SPEED_8_0GT ? "<2%" :
+ "Unknown"),
+ (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
+ hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
+ hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
+ "Unknown"));
- if (hw->mac.ops.is_slot_appropriate(hw))
+ switch (hw->bus_caps.speed) {
+ case fm10k_bus_speed_2500:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ expected_gts = 2 * hw->bus_caps.width;
+ break;
+ case fm10k_bus_speed_5000:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ expected_gts = 4 * hw->bus_caps.width;
+ break;
+ case fm10k_bus_speed_8000:
+ /* 128b/130b encoding has less than 2% impact on throughput */
+ expected_gts = 8 * hw->bus_caps.width;
+ break;
+ default:
+ dev_warn(&interface->pdev->dev,
+ "Unable to determine expected PCI Express bandwidth.\n");
return;
+ }
- dev_warn(dev,
- "For optimal performance, a %s %s slot is recommended.\n",
- (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" :
- hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" :
- "x8"),
- (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
- hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
- "8.0GT/s"));
- dev_warn(dev,
- "A slot with more lanes and/or higher speed is suggested.\n");
+ if (max_gts < expected_gts) {
+ dev_warn(&interface->pdev->dev,
+ "This device requires %dGT/s of bandwidth for optimal performance.\n",
+ expected_gts);
+ dev_warn(&interface->pdev->dev,
+ "A %sslot with x%d lanes is suggested.\n",
+ (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
+ hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
+ hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""),
+ hw->bus_caps.width);
+ }
}
/**
@@ -1739,7 +1864,6 @@ static int fm10k_probe(struct pci_dev *pdev,
{
struct net_device *netdev;
struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
int err;
err = pci_enable_device_mem(pdev);
@@ -1783,7 +1907,6 @@ static int fm10k_probe(struct pci_dev *pdev,
interface->netdev = netdev;
interface->pdev = pdev;
- hw = &interface->hw;
interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
FM10K_UC_ADDR_SIZE);
@@ -1825,24 +1948,12 @@ static int fm10k_probe(struct pci_dev *pdev,
/* Register PTP interface */
fm10k_ptp_register(interface);
- /* print bus type/speed/width info */
- dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n",
- (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" :
- hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
- hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
- "Unknown"),
- (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" :
- hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" :
- hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" :
- "Unknown"),
- (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
- hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
- hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
- "Unknown"));
-
/* print warning for non-optimal configurations */
fm10k_slot_warn(interface);
+ /* report MAC address for logging */
+ dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
+
/* enable SR-IOV after registering netdev to enforce PF/VF ordering */
fm10k_iov_configure(pdev, 0);
@@ -1983,6 +2094,16 @@ static int fm10k_resume(struct pci_dev *pdev)
if (err)
return err;
+ /* assume host is not ready, to prevent race with watchdog in case we
+ * actually don't have connection to the switch
+ */
+ interface->host_ready = false;
+ fm10k_watchdog_host_not_ready(interface);
+
+ /* clear the service task disable bit to allow service task to start */
+ clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ fm10k_service_event_schedule(interface);
+
/* restore SR-IOV interface */
fm10k_iov_resume(pdev);
@@ -2010,6 +2131,15 @@ static int fm10k_suspend(struct pci_dev *pdev,
fm10k_iov_suspend(pdev);
+ /* the watchdog tasks may read registers, which will appear like a
+ * surprise-remove event once the PCI device is disabled. This will
+ * cause us to close the netdevice, so we don't retain the open/closed
+ * state post-resume. Prevent this by disabling the service task while
+ * suspended, until we actually resume.
+ */
+ set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ cancel_work_sync(&interface->service_task);
+
rtnl_lock();
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 3ca0233b3ea2..8c0bdc4e4edd 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -59,6 +59,11 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
return FM10K_ERR_DMA_PENDING;
+ /* verify the switch is ready for reset */
+ reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
+ if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
+ goto out;
+
/* Inititate data path reset */
reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
@@ -72,6 +77,7 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
if (!(reg & FM10K_IP_NOTINRESET))
err = FM10K_ERR_RESET_FAILED;
+out:
return err;
}
@@ -185,19 +191,6 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
}
/**
- * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU
- * @hw: pointer to hardware structure
- *
- * Looks at the PCIe bus info to confirm whether or not this slot can support
- * the necessary bandwidth for this device.
- **/
-static bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw)
-{
- return (hw->bus.speed == hw->bus_caps.speed) &&
- (hw->bus.width == hw->bus_caps.width);
-}
-
-/**
* fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
* @hw: pointer to hardware structure
* @vid: VLAN ID to add to table
@@ -1162,6 +1155,24 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
}
/**
+ * fm10k_iov_select_vid - Select correct default VID
+ * @hw: Pointer to hardware structure
+ * @vid: VID to correct
+ *
+ * Will report an error if VID is out of range. For VID = 0, it will return
+ * either the pf_vid or sw_vid depending on which one is set.
+ */
+static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
+{
+ if (!vid)
+ return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
+ else if (vf_info->pf_vid && vid != vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ else
+ return vid;
+}
+
+/**
* fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
* @hw: Pointer to hardware structure
* @results: Pointer array to message, results[0] is pointer to message
@@ -1175,9 +1186,10 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- int err = 0;
u8 mac[ETH_ALEN];
u32 *result;
+ int err = 0;
+ bool set;
u16 vlan;
u32 vid;
@@ -1193,19 +1205,21 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (err)
return err;
- /* if VLAN ID is 0, set the default VLAN ID instead of 0 */
- if (!vid || (vid == FM10K_VLAN_CLEAR)) {
- if (vf_info->pf_vid)
- vid |= vf_info->pf_vid;
- else
- vid |= vf_info->sw_vid;
- } else if (vid != vf_info->pf_vid) {
+ /* verify upper 16 bits are zero */
+ if (vid >> 16)
return FM10K_ERR_PARAM;
- }
+
+ set = !(vid & FM10K_VLAN_CLEAR);
+ vid &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vid);
+ if (err < 0)
+ return err;
+ else
+ vid = err;
/* update VSI info for VF in regards to VLAN table */
- err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi,
- !(vid & FM10K_VLAN_CLEAR));
+ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
}
if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
@@ -1221,19 +1235,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
memcmp(mac, vf_info->mac, ETH_ALEN))
return FM10K_ERR_PARAM;
- /* if VLAN ID is 0, set the default VLAN ID instead of 0 */
- if (!vlan || (vlan == FM10K_VLAN_CLEAR)) {
- if (vf_info->pf_vid)
- vlan |= vf_info->pf_vid;
- else
- vlan |= vf_info->sw_vid;
- } else if (vf_info->pf_vid) {
- return FM10K_ERR_PARAM;
- }
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+ else
+ vlan = err;
/* notify switch of request for new unicast address */
- err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, mac, vlan,
- !(vlan & FM10K_VLAN_CLEAR), 0);
+ err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
+ mac, vlan, set, 0);
}
if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
@@ -1248,19 +1261,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
return FM10K_ERR_PARAM;
- /* if VLAN ID is 0, set the default VLAN ID instead of 0 */
- if (!vlan || (vlan == FM10K_VLAN_CLEAR)) {
- if (vf_info->pf_vid)
- vlan |= vf_info->pf_vid;
- else
- vlan |= vf_info->sw_vid;
- } else if (vf_info->pf_vid) {
- return FM10K_ERR_PARAM;
- }
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+ else
+ vlan = err;
/* notify switch of request for new multicast address */
- err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan,
- !(vlan & FM10K_VLAN_CLEAR));
+ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
+ mac, vlan, set);
}
return err;
@@ -1849,7 +1861,6 @@ static struct fm10k_mac_ops mac_ops_pf = {
.init_hw = &fm10k_init_hw_pf,
.start_hw = &fm10k_start_hw_generic,
.stop_hw = &fm10k_stop_hw_generic,
- .is_slot_appropriate = &fm10k_is_slot_appropriate_pf,
.update_vlan = &fm10k_update_vlan_pf,
.read_mac_addr = &fm10k_read_mac_addr_pf,
.update_uc_addr = &fm10k_update_uc_addr_pf,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 2a17d82fa37d..318a212f0a78 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -521,7 +521,6 @@ struct fm10k_mac_ops {
s32 (*stop_hw)(struct fm10k_hw *);
s32 (*get_bus_info)(struct fm10k_hw *);
s32 (*get_host_state)(struct fm10k_hw *, bool *);
- bool (*is_slot_appropriate)(struct fm10k_hw *);
s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
s32 (*read_mac_addr)(struct fm10k_hw *);
s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
@@ -763,6 +762,12 @@ enum fm10k_rxdesc_xc {
#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */
#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */
+#define FM10K_RXD_ERR_SWITCH_ERROR 0x0001 /* Switch found bad packet */
+#define FM10K_RXD_ERR_NO_DESCRIPTOR 0x0002 /* No descriptor available */
+#define FM10K_RXD_ERR_PP_ERROR 0x0004 /* RAM error during processing */
+#define FM10K_RXD_ERR_SWITCH_READY 0x0008 /* Link transition mid-packet */
+#define FM10K_RXD_ERR_TOO_BIG 0x0010 /* Pkt too big for single buf */
+
struct fm10k_ftag {
__be16 swpri_type_user;
__be16 vlan;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 94f0f6a146d9..36c8b0aa08fd 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -131,19 +131,6 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
return 0;
}
-/**
- * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU
- * @hw: pointer to hardware structure
- *
- * Looks at the PCIe bus info to confirm whether or not this slot can support
- * the necessary bandwidth for this device. Since the VF has no control over
- * the "slot" it is in, always indicate that the slot is appropriate.
- **/
-static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw)
-{
- return true;
-}
-
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = {
FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN),
@@ -552,7 +539,6 @@ static struct fm10k_mac_ops mac_ops_vf = {
.init_hw = &fm10k_init_hw_vf,
.start_hw = &fm10k_start_hw_generic,
.stop_hw = &fm10k_stop_hw_vf,
- .is_slot_appropriate = &fm10k_is_slot_appropriate_vf,
.update_vlan = &fm10k_update_vlan_vf,
.read_mac_addr = &fm10k_read_mac_addr_vf,
.update_uc_addr = &fm10k_update_uc_addr_vf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index e7462793d48d..4dd3e26129b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -71,7 +71,6 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_REGISTER 0x800000
#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
@@ -94,19 +93,26 @@
#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256
-#define I40E_AQ_WORK_LIMIT 32
+#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
-#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
+#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK 0xffff
+#define I40E_OEM_VER_PATCH_MASK 0xff
+#define I40E_OEM_VER_BUILD_SHIFT 8
+#define I40E_OEM_VER_SHIFT 24
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -243,7 +249,6 @@ struct i40e_pf {
struct pci_dev *pdev;
struct i40e_hw hw;
unsigned long state;
- unsigned long link_check_timeout;
struct msix_entry *msix_entries;
bool fc_autoneg_status;
@@ -305,7 +310,6 @@ struct i40e_pf {
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL BIT_ULL(12)
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
@@ -327,8 +331,11 @@ struct i40e_pf {
#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33)
#define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34)
#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35)
+#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37)
#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38)
+#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
+#define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -409,6 +416,9 @@ struct i40e_pf {
/* These are only valid in NPAR modes */
u32 npar_max_bw;
u32 npar_min_bw;
+
+ u32 ioremap_len;
+ u32 fd_inv;
};
struct i40e_mac_filter {
@@ -460,6 +470,8 @@ struct i40e_vsi {
#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
+ /* Per VSI lock to protect elements/list (MAC filter) */
+ spinlock_t mac_filter_list_lock;
struct list_head mac_filter_list;
/* VSI stats */
@@ -474,6 +486,7 @@ struct i40e_vsi {
#endif
u32 tx_restart;
u32 tx_busy;
+ u64 tx_linearize;
u32 rx_buf_failed;
u32 rx_page_failed;
@@ -489,6 +502,7 @@ struct i40e_vsi {
*/
u16 rx_itr_setting;
u16 tx_itr_setting;
+ u16 int_rate_limit; /* value in usecs */
u16 rss_table_size;
u16 rss_size;
@@ -534,6 +548,7 @@ struct i40e_vsi {
u16 idx; /* index in pf->vsi[] */
u16 veb_idx; /* index of VEB parent */
struct kobject *kobj; /* sysfs object */
+ bool current_isup; /* Sync 'link up' logging */
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -564,6 +579,8 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
+#define ITR_COUNTDOWN_START 100
+ u8 itr_countdown; /* when 0 should adjust ITR */
} ____cacheline_internodealigned_in_smp;
/* lan device */
@@ -573,22 +590,29 @@ struct i40e_device {
};
/**
- * i40e_fw_version_str - format the FW and NVM version strings
+ * i40e_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
**/
-static inline char *i40e_fw_version_str(struct i40e_hw *hw)
+static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
{
static char buf[32];
+ u32 full_ver;
+ u8 ver, patch;
+ u16 build;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+ build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT)
+ & I40E_OEM_VER_BUILD_MASK);
+ patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
snprintf(buf, sizeof(buf),
- "f%d.%d.%05d a%d.%d n%x.%02x e%x",
- hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
- hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ "%x.%02x 0x%x %d.%d.%d",
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
I40E_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
I40E_NVM_VERSION_LO_SHIFT,
- (hw->nvm.eetrack & 0xffffff));
+ hw->nvm.eetrack, ver, build, patch);
return buf;
}
@@ -667,7 +691,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
bool is_vf, bool is_netdev);
void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev);
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi);
@@ -700,7 +724,24 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector, without base_vector
+ **/
+static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
+ /* skip the flush */
+}
+
void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
@@ -739,7 +780,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
-int i40e_init_pf_fcoe(struct i40e_pf *pf);
+void i40e_init_pf_fcoe(struct i40e_pf *pf);
int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
@@ -771,4 +812,5 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index c0e943aecd13..0ff8f01e57ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -482,8 +482,12 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
- if (hw->aq.asq.count == 0)
- return I40E_ERR_NOT_READY;
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.asq.head, 0);
@@ -492,16 +496,13 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
- /* make sure lock is available */
- mutex_lock(&hw->aq.asq_mutex);
-
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_asq_bufs(hw);
+shutdown_asq_out:
mutex_unlock(&hw->aq.asq_mutex);
-
return ret_code;
}
@@ -515,8 +516,12 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
- if (hw->aq.arq.count == 0)
- return I40E_ERR_NOT_READY;
+ mutex_lock(&hw->aq.arq_mutex);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.arq.head, 0);
@@ -525,16 +530,13 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
- /* make sure lock is available */
- mutex_lock(&hw->aq.arq_mutex);
-
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_arq_bufs(hw);
+shutdown_arq_out:
mutex_unlock(&hw->aq.arq_mutex);
-
return ret_code;
}
@@ -551,8 +553,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
**/
i40e_status i40e_init_adminq(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ u16 cfg_ptr, oem_hi, oem_lo;
u16 eetrack_lo, eetrack_hi;
+ i40e_status ret_code;
int retry = 0;
/* verify input for valid configuration */
@@ -611,6 +614,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+ i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+ &oem_hi);
+ i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+ &oem_lo);
+ hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
@@ -657,6 +666,9 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
/* destroy the locks */
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+
return ret_code;
}
@@ -678,8 +690,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "%s: ntc %d head %d.\n", __func__, ntc,
- rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
@@ -742,19 +753,23 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
u16 retval = 0;
u32 val = 0;
- val = rd32(hw, hw->aq.asq.head);
- if (val >= hw->aq.num_asq_entries) {
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "AQTX: head overrun at %d\n", val);
+ "AQTX: Admin queue not initialized.\n");
status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
+ goto asq_send_command_error;
}
- if (hw->aq.asq.count == 0) {
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Admin queue not initialized.\n");
+ "AQTX: head overrun at %d\n", val);
status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
+ goto asq_send_command_error;
}
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
@@ -779,8 +794,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
desc->flags &= ~cpu_to_le16(details->flags_dis);
desc->flags |= cpu_to_le16(details->flags_ena);
- mutex_lock(&hw->aq.asq_mutex);
-
if (buff_size > hw->aq.asq_buf_size) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
@@ -889,6 +902,10 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
"AQTX: desc and buffer writeback:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ *details->wb_desc = *desc_on_ring;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -900,7 +917,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
asq_send_command_error:
mutex_unlock(&hw->aq.asq_mutex);
-asq_send_command_exit:
return status;
}
@@ -1023,6 +1039,19 @@ clean_arq_element_err:
i40e_release_nvm(hw);
hw->aq.nvm_release_on_done = false;
}
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
}
return ret_code;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 28e519a50de4..12fbbddea299 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -69,6 +69,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
+ struct i40e_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
@@ -108,9 +109,10 @@ struct i40e_adminq_info {
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
**/
-static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
@@ -142,8 +144,9 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
return -EAGAIN;
- if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
+
return aq_to_posix[aq_rc];
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 95d23bfbcbf1..6584b6cd73fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1722,11 +1722,13 @@ struct i40e_aqc_get_link_status {
u8 phy_type; /* i40e_aq_phy_type */
u8 link_speed; /* i40e_aq_link_speed */
u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION 0x01
#define I40E_AQ_LINK_FAULT 0x02
#define I40E_AQ_LINK_FAULT_TX 0x04
#define I40E_AQ_LINK_FAULT_RX 0x08
#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_LINK_UP_PORT 0x20
#define I40E_AQ_MEDIA_AVAILABLE 0x40
#define I40E_AQ_SIGNAL_DETECT 0x80
u8 an_info;
@@ -2062,6 +2064,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
@@ -2070,10 +2073,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
-#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved1;
u8 oper_num_tc;
@@ -2081,9 +2093,9 @@ struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved2;
u8 oper_tc_bw[8];
u8 oper_pfc_en;
- u8 reserved3;
+ u8 reserved3[2];
__le16 oper_app_prio;
- u8 reserved4;
+ u8 reserved4[2];
__le16 tlv_status;
};
@@ -2120,6 +2132,13 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
struct i40e_aqc_lldp_set_local_mib {
#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
__le16 length;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 114dc6450183..2d74c6e4d7b6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -51,7 +51,9 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_SFP_X722:
@@ -85,7 +87,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
* @hw: pointer to the HW structure
* @aq_err: the AQ error code to convert
**/
-char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
{
switch (aq_err) {
case I40E_AQ_RC_OK:
@@ -145,7 +147,7 @@ char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
**/
-char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
{
switch (stat_err) {
case 0:
@@ -329,25 +331,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
len = buf_len;
/* write the full 16-byte chunks */
for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask,
- "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, buf[i], buf[i + 1], buf[i + 2],
- buf[i + 3], buf[i + 4], buf[i + 5],
- buf[i + 6], buf[i + 7], buf[i + 8],
- buf[i + 9], buf[i + 10], buf[i + 11],
- buf[i + 12], buf[i + 13], buf[i + 14],
- buf[i + 15]);
+ i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
/* write whatever's left over without overrunning the buffer */
- if (i < len) {
- char d_buf[80];
- int j = 0;
-
- memset(d_buf, 0, sizeof(d_buf));
- j += sprintf(d_buf, "\t0x%04X ", i);
- while (i < len)
- j += sprintf(&d_buf[j], " %02X", buf[i++]);
- i40e_debug(hw, mask, "%s\n", d_buf);
- }
+ if (i < len)
+ i40e_debug(hw, mask, "\t0x%04X %*ph\n",
+ i, len - i, buf + i);
}
}
@@ -441,9 +429,6 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
- cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
- cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
-
status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
return status;
@@ -518,8 +503,6 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
- cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
- cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
@@ -961,6 +944,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
+ if (hw->mac.type == I40E_MAC_X722)
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -1038,7 +1024,7 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+ ether_addr_copy(mac_addr, addrs.pf_lan_mac);
return status;
}
@@ -1061,7 +1047,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
- memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ ether_addr_copy(mac_addr, addrs.port_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1119,7 +1105,7 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+ ether_addr_copy(mac_addr, addrs.pf_san_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1260,7 +1246,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (cnt = 0; cnt < grst_del + 2; cnt++) {
+ for (cnt = 0; cnt < grst_del + 10; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -1620,6 +1606,9 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
+ if (report_init)
+ hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
+
return status;
}
@@ -1720,14 +1709,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
}
/* Update the link info */
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ status = i40e_update_link_info(hw);
if (status) {
/* Wait a little bit (on 40G cards it sometimes takes a really
* long time for link to come back from the atomic reset)
* and try once more
*/
msleep(1000);
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ status = i40e_update_link_info(hw);
}
if (status)
*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -2238,27 +2227,54 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
/**
* i40e_get_link_status - get status of the HW network link
* @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
*
- * Returns true if link is up, false if link is down.
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != 0
*
* Side effect: LinkStatusEvent reporting becomes enabled
**/
-bool i40e_get_link_status(struct i40e_hw *hw)
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
{
i40e_status status = 0;
- bool link_status = false;
if (hw->phy.get_link_info) {
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ status = i40e_update_link_info(hw);
if (status)
- goto i40e_get_link_status_exit;
+ i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+ status);
}
- link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+ *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+ return status;
+}
+
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+i40e_status i40e_update_link_info(struct i40e_hw *hw)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ i40e_status status = 0;
+
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ if (status)
+ return status;
+
+ if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ status = i40e_aq_get_phy_capabilities(hw, false, false,
+ &abilities, NULL);
+ if (status)
+ return status;
-i40e_get_link_status_exit:
- return link_status;
+ memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type));
+ }
+
+ return status;
}
/**
@@ -2365,6 +2381,7 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
if (floating) {
u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+
if (flags & I40E_AQC_ADD_VEB_FLOATING)
*floating = true;
else
@@ -3779,7 +3796,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
if (mac_addr)
- memcpy(cmd->mac, mac_addr, ETH_ALEN);
+ ether_addr_copy(cmd->mac, mac_addr);
cmd->etype = cpu_to_le16(ethtype);
cmd->flags = cpu_to_le16(flags);
@@ -3798,6 +3815,28 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 seid)
+{
+ u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+ i40e_status status;
+
+ status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+ seid, 0, true, NULL,
+ NULL);
+ if (status)
+ hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
+/**
* i40e_aq_alternate_read
* @hw: pointer to the hardware structure
* @reg_addr0: address of first dword to be read
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 90de46aef557..2691277c0055 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -292,6 +292,190 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
}
/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcenable = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, typelength, offset = 0;
+ struct i40e_cee_app_prio *app;
+ u8 i, up, selector;
+
+ typelength = ntohs(tlv->hdr.typelen);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ dcbcfg->numapps = length / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+ if (app->prio_map & BIT(up))
+ break;
+ }
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+ if (selector == I40E_CEE_APP_SEL_ETHTYPE)
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ else if (selector == I40E_CEE_APP_SEL_TCPIP)
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ else
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+
+ dcbcfg->app[i].protocolid = ntohs(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 len, tlvlen, sublen, typelength;
+ struct i40e_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u32 ouisubtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ /* Return if not CEE DCBX */
+ if (subtype != I40E_CEE_DCBX_TYPE)
+ return;
+
+ typelength = ntohs(tlv->typelength);
+ tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+ sizeof(struct i40e_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+ typelength = ntohs(sub_tlv->hdr.typelen);
+ sublen = (u16)((typelength &
+ I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ switch (subtype) {
+ case I40E_CEE_SUBTYPE_PG_CFG:
+ i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_PFC_CFG:
+ i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_APP_PRI:
+ i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+ sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
* i40e_parse_org_tlv
* @tlv: Organization specific TLV
* @dcbcfg: Local store to update ETS REC data
@@ -312,6 +496,9 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
case I40E_IEEE_8021QAZ_OUI:
i40e_parse_ieee_tlv(tlv, dcbcfg);
break;
+ case I40E_CEE_DCBX_OUI:
+ i40e_parse_cee_tlv(tlv, dcbcfg);
+ break;
default:
break;
}
@@ -502,15 +689,18 @@ static void i40e_cee_to_dcb_config(
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
for (i = 0; i < 4; i++) {
tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
- dcbcfg->etscfg.prioritytable[i*2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
I40E_CEE_PGID_PRIO_0_MASK) >>
I40E_CEE_PGID_PRIO_0_SHIFT);
- dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
}
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
@@ -531,37 +721,85 @@ static void i40e_cee_to_dcb_config(
dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
- status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
- I40E_AQC_CEE_APP_STATUS_SHIFT;
+ i = 0;
+ status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+ I40E_AQC_CEE_FCOE_STATUS_SHIFT;
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
- /* Add APPs if Error is False and Oper/Sync is True */
+ /* Add FCoE APP if Error is False and Oper/Sync is True */
if (!err && sync && oper) {
- /* CEE operating configuration supports FCoE/iSCSI/FIP only */
- dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
-
/* FCoE APP */
- dcbcfg->app[0].priority =
+ dcbcfg->app[i].priority =
(app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
I40E_AQC_CEE_APP_FCOE_SHIFT;
- dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
- dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+ i++;
+ }
+ status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+ I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add iSCSI APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
/* iSCSI APP */
- dcbcfg->app[1].priority =
+ dcbcfg->app[i].priority =
(app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
I40E_AQC_CEE_APP_ISCSI_SHIFT;
- dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
- dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+ i++;
+ }
+ status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+ I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FIP APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
/* FIP APP */
- dcbcfg->app[2].priority =
+ dcbcfg->app[i].priority =
(app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
I40E_AQC_CEE_APP_FIP_SHIFT;
- dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
- dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+ i++;
}
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = 0;
+
+out:
+ return ret;
}
/**
@@ -579,7 +817,7 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
/* If Firmware version < v4.33 IEEE only */
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
(hw->aq.fw_maj_ver < 4))
- goto ieee;
+ return i40e_get_ieee_dcb_config(hw);
/* If Firmware version == v4.33 use old CEE struct */
if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
@@ -608,16 +846,14 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
/* CEE mode not enabled try querying IEEE data */
if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
- goto ieee;
- else
+ return i40e_get_ieee_dcb_config(hw);
+
+ if (ret)
goto out;
-ieee:
- /* IEEE mode */
- hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
- /* Get Local DCB Config */
+ /* Get CEE DCB Desired Config */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
- &hw->local_dcbx_config);
+ &hw->desired_dcbx_config);
if (ret)
goto out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 50fc894a4cde..92d01042c1f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -44,6 +44,15 @@
#define I40E_IEEE_SUBTYPE_PFC_CFG 11
#define I40E_IEEE_SUBTYPE_APP_PRI 12
+#define I40E_CEE_DCBX_OUI 0x001b21
+#define I40E_CEE_DCBX_TYPE 2
+
+#define I40E_CEE_SUBTYPE_CTRL 1
+#define I40E_CEE_SUBTYPE_PG_CFG 2
+#define I40E_CEE_SUBTYPE_PFC_CFG 3
+#define I40E_CEE_SUBTYPE_APP_PRI 4
+
+#define I40E_CEE_MAX_FEAT_TYPE 3
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
@@ -98,6 +107,36 @@ struct i40e_lldp_org_tlv {
__be32 ouisubtype;
u8 tlvinfo[1];
};
+
+struct i40e_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
#pragma pack()
i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 1c51f736a8d0..886e667f2f1c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -236,14 +236,13 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_dcb_app_priority_table *app)
{
int v, err;
+
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && pf->vsi[v]->netdev) {
err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
- if (err)
- dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
- __func__, pf->vsi[v]->seid,
- err, app->selector,
- app->protocolid, app->priority);
+ dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+ pf->vsi[v]->seid, err, app->selector,
+ app->protocolid, app->priority);
}
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d7c15d17faa6..d4b7af9a2fc8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -404,82 +404,82 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev,
" net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
- (long unsigned int)nstat->rx_packets,
- (long unsigned int)nstat->rx_bytes,
- (long unsigned int)nstat->rx_errors,
- (long unsigned int)nstat->rx_dropped);
+ (unsigned long int)nstat->rx_packets,
+ (unsigned long int)nstat->rx_bytes,
+ (unsigned long int)nstat->rx_errors,
+ (unsigned long int)nstat->rx_dropped);
dev_info(&pf->pdev->dev,
" net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
- (long unsigned int)nstat->tx_packets,
- (long unsigned int)nstat->tx_bytes,
- (long unsigned int)nstat->tx_errors,
- (long unsigned int)nstat->tx_dropped);
+ (unsigned long int)nstat->tx_packets,
+ (unsigned long int)nstat->tx_bytes,
+ (unsigned long int)nstat->tx_errors,
+ (unsigned long int)nstat->tx_dropped);
dev_info(&pf->pdev->dev,
" net_stats: multicast = %lu, collisions = %lu\n",
- (long unsigned int)nstat->multicast,
- (long unsigned int)nstat->collisions);
+ (unsigned long int)nstat->multicast,
+ (unsigned long int)nstat->collisions);
dev_info(&pf->pdev->dev,
" net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
- (long unsigned int)nstat->rx_length_errors,
- (long unsigned int)nstat->rx_over_errors,
- (long unsigned int)nstat->rx_crc_errors);
+ (unsigned long int)nstat->rx_length_errors,
+ (unsigned long int)nstat->rx_over_errors,
+ (unsigned long int)nstat->rx_crc_errors);
dev_info(&pf->pdev->dev,
" net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
- (long unsigned int)nstat->rx_frame_errors,
- (long unsigned int)nstat->rx_fifo_errors,
- (long unsigned int)nstat->rx_missed_errors);
+ (unsigned long int)nstat->rx_frame_errors,
+ (unsigned long int)nstat->rx_fifo_errors,
+ (unsigned long int)nstat->rx_missed_errors);
dev_info(&pf->pdev->dev,
" net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
- (long unsigned int)nstat->tx_aborted_errors,
- (long unsigned int)nstat->tx_carrier_errors,
- (long unsigned int)nstat->tx_fifo_errors);
+ (unsigned long int)nstat->tx_aborted_errors,
+ (unsigned long int)nstat->tx_carrier_errors,
+ (unsigned long int)nstat->tx_fifo_errors);
dev_info(&pf->pdev->dev,
" net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
- (long unsigned int)nstat->tx_heartbeat_errors,
- (long unsigned int)nstat->tx_window_errors);
+ (unsigned long int)nstat->tx_heartbeat_errors,
+ (unsigned long int)nstat->tx_window_errors);
dev_info(&pf->pdev->dev,
" net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
- (long unsigned int)nstat->rx_compressed,
- (long unsigned int)nstat->tx_compressed);
+ (unsigned long int)nstat->rx_compressed,
+ (unsigned long int)nstat->tx_compressed);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.rx_packets,
- (long unsigned int)vsi->net_stats_offsets.rx_bytes,
- (long unsigned int)vsi->net_stats_offsets.rx_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_dropped);
+ (unsigned long int)vsi->net_stats_offsets.rx_packets,
+ (unsigned long int)vsi->net_stats_offsets.rx_bytes,
+ (unsigned long int)vsi->net_stats_offsets.rx_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_dropped);
dev_info(&pf->pdev->dev,
" net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.tx_packets,
- (long unsigned int)vsi->net_stats_offsets.tx_bytes,
- (long unsigned int)vsi->net_stats_offsets.tx_errors,
- (long unsigned int)vsi->net_stats_offsets.tx_dropped);
+ (unsigned long int)vsi->net_stats_offsets.tx_packets,
+ (unsigned long int)vsi->net_stats_offsets.tx_bytes,
+ (unsigned long int)vsi->net_stats_offsets.tx_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_dropped);
dev_info(&pf->pdev->dev,
" net_stats_offsets: multicast = %lu, collisions = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.multicast,
- (long unsigned int)vsi->net_stats_offsets.collisions);
+ (unsigned long int)vsi->net_stats_offsets.multicast,
+ (unsigned long int)vsi->net_stats_offsets.collisions);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
+ (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
+ (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
- (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
- (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
+ (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
- (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
+ (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.rx_compressed,
- (long unsigned int)vsi->net_stats_offsets.tx_compressed);
+ (unsigned long int)vsi->net_stats_offsets.rx_compressed,
+ (unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy,
@@ -487,6 +487,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+
if (!rx_ring)
continue;
@@ -527,7 +528,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
- (long unsigned int)rx_ring->dma);
+ (unsigned long int)rx_ring->dma);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, rx_ring->vsi,
@@ -535,6 +536,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+
if (!tx_ring)
continue;
@@ -573,7 +575,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, tx_ring->size,
- (long unsigned int)tx_ring->dma);
+ (unsigned long int)tx_ring->dma);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: vsi = %p, q_vector = %p\n",
i, tx_ring->vsi,
@@ -743,6 +745,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
ring = &(hw->aq.asq);
for (i = 0; i < ring->count; i++) {
struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
dev_info(&pf->pdev->dev,
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
@@ -755,6 +758,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
ring = &(hw->aq.arq);
for (i = 0; i < ring->count; i++) {
struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
dev_info(&pf->pdev->dev,
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
@@ -949,24 +953,6 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
}
}
-/**
- * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
- * @pf: the PF that would be altered
- * @flag: flag that needs enabling or disabling
- * @enable: Enable/disable FD SD/ATR
- **/
-static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
-{
- if (enable) {
- pf->flags |= flag;
- } else {
- pf->flags &= ~flag;
- pf->auto_disable_flags |= flag;
- }
- dev_info(&pf->pdev->dev, "requesting a PF reset\n");
- i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
-}
-
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
/**
* i40e_dbg_command_write - write into command datum
@@ -1038,7 +1024,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
} else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
- sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del vsi: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
@@ -1145,8 +1137,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, ma, vlan, false, false);
- ret = i40e_sync_vsi_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ ret = i40e_sync_vsi_filters(vsi, true);
if (f && !ret)
dev_info(&pf->pdev->dev,
"add macaddr: %pM vlan=%d added to VSI %d\n",
@@ -1182,8 +1176,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_del_filter(vsi, ma, vlan, false, false);
- ret = i40e_sync_vsi_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ ret = i40e_sync_vsi_filters(vsi, true);
if (!ret)
dev_info(&pf->pdev->dev,
"del macaddr: %pM vlan=%d removed from VSI %d\n",
@@ -1488,6 +1484,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
u32 value;
+
cnt = sscanf(&cmd_buf[4], "%i", &address);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "read <reg>\n");
@@ -1495,9 +1492,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
/* check the range on address */
- if (address >= I40E_MAX_REGISTER) {
- dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
- address);
+ if (address > (pf->ioremap_len - sizeof(u32))) {
+ dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
+ address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
goto command_write_done;
}
@@ -1507,6 +1504,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "write", 5) == 0) {
u32 address, value;
+
cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "write <reg> <value>\n");
@@ -1514,9 +1512,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
/* check the range on address */
- if (address >= I40E_MAX_REGISTER) {
- dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
- address);
+ if (address > (pf->ioremap_len - sizeof(u32))) {
+ dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
+ address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
goto command_write_done;
}
wr32(&pf->hw, address, value);
@@ -1528,6 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
+
for (i = 0; i < pf->num_alloc_vsi; i++)
i40e_vsi_reset_stats(pf->vsi[i]);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
@@ -1726,8 +1725,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
for (i = 0; i < packet_len; i++) {
- sscanf(&asc_packet[j], "%2hhx ",
- &raw_packet[i]);
+ cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
+ if (!cnt)
+ break;
j += 3;
}
dev_info(&pf->pdev->dev, "FD raw packet dump\n");
@@ -1745,16 +1745,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
raw_packet = NULL;
kfree(asc_packet);
asc_packet = NULL;
- } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
- } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
+
ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1779,6 +1776,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
int ret;
+
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
pf->hw.mac.addr,
I40E_ETH_P_LLDP, 0,
@@ -1807,6 +1805,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
u16 llen, rlen;
int ret;
u8 *buff;
+
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1833,6 +1832,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
u16 llen, rlen;
int ret;
u8 *buff;
+
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1858,6 +1858,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
int ret;
+
ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
true, NULL);
if (ret) {
@@ -1868,6 +1869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
int ret;
+
ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
false, NULL);
if (ret) {
@@ -1969,8 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
- dev_info(&pf->pdev->dev, " fd-atr off\n");
- dev_info(&pf->pdev->dev, " fd-atr on\n");
dev_info(&pf->pdev->dev, " fd current cnt");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
@@ -2105,6 +2105,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
}
} else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
int mtu;
+
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
&vsi_seid, &mtu);
if (cnt != 2) {
@@ -2220,7 +2221,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf)
create_failed:
dev_info(dev, "debugfs dir/file for %s failed\n", name);
debugfs_remove_recursive(pf->i40e_dbg_pf);
- return;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
new file mode 100644
index 000000000000..c601ca4a610c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 13a5d4cf494b..3f385ffe420f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -87,11 +87,9 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+ I40E_VSI_STAT("tx_linearize", tx_linearize),
};
-static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
- struct ethtool_rxnfc *cmd);
-
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
* but they are separate. This device supports Virtualization, and
* as such might have several netdevs supporting VMDq and FCoE going
@@ -229,10 +227,12 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"NPAR",
+ "LinkPolling",
+ "flow-director-atr",
+ "veb-stats",
};
-#define I40E_PRIV_FLAGS_STR_LEN \
- (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
@@ -253,7 +253,8 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
**/
static void i40e_get_settings_link_up(struct i40e_hw *hw,
struct ethtool_cmd *ecmd,
- struct net_device *netdev)
+ struct net_device *netdev,
+ struct i40e_pf *pf)
{
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
u32 link_speed = hw_link_info->link_speed;
@@ -272,65 +273,49 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_40GBASE_AOC:
ecmd->supported = SUPPORTED_40000baseCR4_Full;
break;
- case I40E_PHY_TYPE_40GBASE_KR4:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_40000baseKR4_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_40000baseKR4_Full;
- break;
case I40E_PHY_TYPE_40GBASE_SR4:
ecmd->supported = SUPPORTED_40000baseSR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_LR4:
ecmd->supported = SUPPORTED_40000baseLR4_Full;
break;
- case I40E_PHY_TYPE_20GBASE_KR2:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_20000baseKR2_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_20000baseKR2_Full;
- break;
- case I40E_PHY_TYPE_10GBASE_KX4:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseKX4_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseKX4_Full;
- break;
- case I40E_PHY_TYPE_10GBASE_KR:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseKR_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseKR_Full;
- break;
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ if (hw_link_info->module_type[2] &
+ I40E_MODULE_TYPE_1000BASE_SX ||
+ hw_link_info->module_type[2] &
+ I40E_MODULE_TYPE_1000BASE_LX) {
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+ if (hw_link_info->requested_speeds &
+ I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- break;
- case I40E_PHY_TYPE_1000BASE_KX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseKX_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseKX_Full;
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
- case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
+ SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_100baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
ecmd->advertising |= ADVERTISED_100baseT_Full;
break;
@@ -350,12 +335,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
break;
case I40E_PHY_TYPE_SGMII:
ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
+ SUPPORTED_1000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (pf->hw.mac.type == I40E_MAC_X722) {
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ if (hw_link_info->requested_speeds &
+ I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ }
+ break;
+ /* Backplane is set based on supported phy types in get_settings
+ * so don't set anything here but don't warn either
+ */
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_1000BASE_KX:
break;
default:
/* if we got here and link is up something bad is afoot */
@@ -394,64 +391,73 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
* Reports link settings that can be determined when link is down
**/
static void i40e_get_settings_link_down(struct i40e_hw *hw,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd,
+ struct i40e_pf *pf)
{
- struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
/* link is down and the driver needs to fall back on
- * device ID to determine what kinds of info to display,
- * it's mostly a guess that may change when link is up
+ * supported phy types to figure out what info to display
*/
- switch (hw->device_id) {
- case I40E_DEV_ID_QSFP_A:
- case I40E_DEV_ID_QSFP_B:
- case I40E_DEV_ID_QSFP_C:
- /* pluggable QSFP */
- ecmd->supported = SUPPORTED_40000baseSR4_Full |
- SUPPORTED_40000baseCR4_Full |
- SUPPORTED_40000baseLR4_Full;
- ecmd->advertising = ADVERTISED_40000baseSR4_Full |
- ADVERTISED_40000baseCR4_Full |
- ADVERTISED_40000baseLR4_Full;
- break;
- case I40E_DEV_ID_KX_B:
- /* backplane 40G */
- ecmd->supported = SUPPORTED_40000baseKR4_Full;
- ecmd->advertising = ADVERTISED_40000baseKR4_Full;
- break;
- case I40E_DEV_ID_KX_C:
- /* backplane 10G */
- ecmd->supported = SUPPORTED_10000baseKR_Full;
- ecmd->advertising = ADVERTISED_10000baseKR_Full;
- break;
- case I40E_DEV_ID_10G_BASE_T:
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- /* Figure out what has been requested */
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->supported = 0x0;
+ ecmd->advertising = 0x0;
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ if (pf->hw.mac.type == I40E_MAC_X722) {
+ ecmd->supported |= SUPPORTED_100baseT_Full;
ecmd->advertising |= ADVERTISED_100baseT_Full;
- break;
- case I40E_DEV_ID_20G_KR2:
- /* backplane 20G */
- ecmd->supported = SUPPORTED_20000baseKR2_Full;
- ecmd->advertising = ADVERTISED_20000baseKR2_Full;
- break;
- default:
- /* all the rest are 10G/1G */
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
- /* Figure out what has been requested */
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- break;
+ }
}
+ if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XFI ||
+ phy_types & I40E_CAP_PHY_TYPE_SFI ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+ ecmd->supported |= SUPPORTED_10000baseT_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+ ecmd->supported |= SUPPORTED_40000baseCR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_40000baseCR4_Full;
+ }
+ if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+ !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_100baseT_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_100baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ ecmd->supported |= SUPPORTED_40000baseSR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ ecmd->supported |= SUPPORTED_40000baseLR4_Full;
/* With no link speed and duplex are unknown */
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -475,12 +481,43 @@ static int i40e_get_settings(struct net_device *netdev,
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
if (link_up)
- i40e_get_settings_link_up(hw, ecmd, netdev);
+ i40e_get_settings_link_up(hw, ecmd, netdev, pf);
else
- i40e_get_settings_link_down(hw, ecmd);
+ i40e_get_settings_link_down(hw, ecmd, pf);
/* Now set the settings that don't rely on link being up/down */
+ /* For backplane, supported and advertised are only reliant on the
+ * phy types the NVM specifies are supported.
+ */
+ if (hw->device_id == I40E_DEV_ID_KX_B ||
+ hw->device_id == I40E_DEV_ID_KX_C ||
+ hw->device_id == I40E_DEV_ID_20G_KR2 ||
+ hw->device_id == I40E_DEV_ID_20G_KR2_A) {
+ ecmd->supported = SUPPORTED_Autoneg;
+ ecmd->advertising = ADVERTISED_Autoneg;
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+ ecmd->supported |= SUPPORTED_40000baseKR4_Full;
+ ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
+ }
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+ ecmd->supported |= SUPPORTED_20000baseKR2_Full;
+ ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
+ }
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+ ecmd->supported |= SUPPORTED_10000baseKR_Full;
+ ecmd->advertising |= ADVERTISED_10000baseKR_Full;
+ }
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+ ecmd->supported |= SUPPORTED_10000baseKX4_Full;
+ ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
+ }
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+ ecmd->supported |= SUPPORTED_1000baseKX_Full;
+ ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+ }
+ }
+
/* Set autoneg settings */
ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -580,6 +617,14 @@ static int i40e_set_settings(struct net_device *netdev,
hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
return -EOPNOTSUPP;
+ if (hw->device_id == I40E_DEV_ID_KX_B ||
+ hw->device_id == I40E_DEV_ID_KX_C ||
+ hw->device_id == I40E_DEV_ID_20G_KR2 ||
+ hw->device_id == I40E_DEV_ID_20G_KR2_A) {
+ netdev_info(netdev, "Changing settings is not supported on backplane.\n");
+ return -EOPNOTSUPP;
+ }
+
/* get our own copy of the bits to check against */
memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
i40e_get_settings(netdev, &safe_ecmd);
@@ -616,28 +661,31 @@ static int i40e_set_settings(struct net_device *netdev,
/* Check autoneg */
if (autoneg == AUTONEG_ENABLE) {
- /* If autoneg is not supported, return error */
- if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
- netdev_info(netdev, "Autoneg not supported on this phy\n");
- return -EINVAL;
- }
/* If autoneg was not already enabled */
if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+ /* If autoneg is not supported, return error */
+ if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+ netdev_info(netdev, "Autoneg not supported on this phy\n");
+ return -EINVAL;
+ }
+ /* Autoneg is allowed to change */
config.abilities = abilities.abilities |
I40E_AQ_PHY_ENABLE_AN;
change = true;
}
} else {
- /* If autoneg is supported 10GBASE_T is the only phy that
- * can disable it, so otherwise return error
- */
- if (safe_ecmd.supported & SUPPORTED_Autoneg &&
- hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
- netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
- return -EINVAL;
- }
/* If autoneg is currently enabled */
if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+ /* If autoneg is supported 10GBASE_T is the only PHY
+ * that can disable it, so otherwise return error
+ */
+ if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+ hw->phy.link_info.phy_type !=
+ I40E_PHY_TYPE_10GBASE_T) {
+ netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+ return -EINVAL;
+ }
+ /* Autoneg is allowed to change */
config.abilities = abilities.abilities &
~I40E_AQ_PHY_ENABLE_AN;
change = true;
@@ -664,6 +712,13 @@ static int i40e_set_settings(struct net_device *netdev,
advertise & ADVERTISED_40000baseLR4_Full)
config.link_speed |= I40E_LINK_SPEED_40GB;
+ /* If speed didn't get set, set it to what it currently is.
+ * This is needed because if advertise is 0 (as it is when autoneg
+ * is disabled) then speed won't get set.
+ */
+ if (!config.link_speed)
+ config.link_speed = abilities.link_speed;
+
if (change || (abilities.link_speed != config.link_speed)) {
/* copy over the rest of the abilities */
config.phy_type = abilities.phy_type;
@@ -680,7 +735,7 @@ static int i40e_set_settings(struct net_device *netdev,
/* Tell the OS link is going down, the link will go
* back up when fw says it is ready asynchronously
*/
- netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
+ i40e_print_link_message(vsi, false);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
}
@@ -694,11 +749,11 @@ static int i40e_set_settings(struct net_device *netdev,
return -EAGAIN;
}
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ status = i40e_update_link_info(hw);
if (status)
- netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
+ netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -824,7 +879,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
/* Tell the OS link is going down, the link will go back up when fw
* says it is ready asynchronously
*/
- netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
+ i40e_print_link_message(vsi, false);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
@@ -948,9 +1003,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
cmd = (struct i40e_nvm_access *)eeprom;
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val &&
- ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
- (hw->debug_mask & I40E_DEBUG_NVM)))
+ if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
dev_info(&pf->pdev->dev,
"NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -1054,10 +1107,7 @@ static int i40e_set_eeprom(struct net_device *netdev,
cmd = (struct i40e_nvm_access *)eeprom;
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val &&
- ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
- hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
- (hw->debug_mask & I40E_DEBUG_NVM)))
+ if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
dev_info(&pf->pdev->dev,
"NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -1077,11 +1127,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, i40e_driver_version_str,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
+ strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
}
static void i40e_get_ringparam(struct net_device *netdev,
@@ -1166,6 +1215,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
+ /* the desc and bi pointers will be reallocated in the
+ * setup call
+ */
+ tx_rings[i].desc = NULL;
+ tx_rings[i].rx_bi = NULL;
err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) {
while (i) {
@@ -1196,6 +1250,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
+ /* the desc and bi pointers will be reallocated in the
+ * setup call
+ */
+ rx_rings[i].desc = NULL;
+ rx_rings[i].rx_bi = NULL;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) {
while (i) {
@@ -1263,7 +1322,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
int len = I40E_PF_STATS_LEN(netdev);
- if (pf->lan_veb != I40E_NO_VEB)
+ if ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
len += I40E_VEB_STATS_TOTAL;
return len;
} else {
@@ -1336,8 +1396,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
- if (pf->lan_veb != I40E_NO_VEB) {
+ if ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
struct i40e_veb *veb = pf->veb[pf->lan_veb];
+
for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
p = (char *)veb;
p += i40e_gstrings_veb_stats[j].stat_offset;
@@ -1415,7 +1477,8 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
- if (pf->lan_veb != I40E_NO_VEB) {
+ if ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "veb.%s",
i40e_gstrings_veb_stats[i].stat_string);
@@ -1510,9 +1573,18 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
+ i40e_status status;
+ bool link_up = false;
netif_info(pf, hw, netdev, "link test\n");
- if (i40e_get_link_status(&pf->hw))
+ status = i40e_get_link_status(&pf->hw, &link_up);
+ if (status) {
+ netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
+ *data = 1;
+ return *data;
+ }
+
+ if (link_up)
*data = 0;
else
*data = 1;
@@ -1581,7 +1653,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
int i;
for (i = 0; i < pf->num_alloc_vfs; i++)
- if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+ if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
return true;
return false;
}
@@ -1788,6 +1860,14 @@ static int i40e_get_coalesce(struct net_device *netdev,
ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ /* we use the _usecs_high to store/set the interrupt rate limit
+ * that the hardware supports, that almost but not quite
+ * fits the original intent of the ethtool variable,
+ * the rx_coalesce_usecs_high limits total interrupts
+ * per second from both tx/rx sources.
+ */
+ ec->rx_coalesce_usecs_high = vsi->int_rate_limit;
+ ec->tx_coalesce_usecs_high = vsi->int_rate_limit;
return 0;
}
@@ -1806,6 +1886,17 @@ static int i40e_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+ /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */
+ if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
+ netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+ return -EINVAL;
+ }
+
vector = vsi->base_vector;
if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
(ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
@@ -1819,6 +1910,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+
if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
(ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
@@ -1843,11 +1936,14 @@ static int i40e_set_coalesce(struct net_device *netdev,
vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+
q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+ wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
i40e_flush(hw);
}
@@ -2610,10 +2706,51 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
ret_flags |= pf->hw.func_caps.npar_enable ?
I40E_PRIV_FLAGS_NPAR_FLAG : 0;
+ ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
+ I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
+ ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
+ I40E_PRIV_FLAGS_FD_ATR : 0;
+ ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
+ I40E_PRIV_FLAGS_VEB_STATS : 0;
return ret_flags;
}
+/**
+ * i40e_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
+ pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
+
+ /* allow the user to control the state of the Flow
+ * Director ATR (Application Targeted Routing) feature
+ * of the driver
+ */
+ if (flags & I40E_PRIV_FLAGS_FD_ATR) {
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ }
+
+ if (flags & I40E_PRIV_FLAGS_VEB_STATS)
+ pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
+ return 0;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.set_settings = i40e_set_settings,
@@ -2650,6 +2787,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
.get_priv_flags = i40e_get_priv_flags,
+ .set_priv_flags = i40e_set_priv_flags,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 5ea75dd537d6..fe5d9bf3ed6d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -272,10 +272,8 @@ out:
/**
* i40e_fcoe_sw_init - sets up the HW for FCoE
* @pf: pointer to PF
- *
- * Returns 0 if FCoE is supported otherwise the error code
**/
-int i40e_init_pf_fcoe(struct i40e_pf *pf)
+void i40e_init_pf_fcoe(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -286,14 +284,14 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
pf->fcoe_hmc_filt_num = 0;
if (!pf->hw.func_caps.fcoe) {
- dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
- return 0;
+ dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n");
+ return;
}
if (!pf->hw.func_caps.dcb) {
dev_warn(&pf->pdev->dev,
"Hardware is not DCB capable not enabling FCoE.\n");
- return 0;
+ return;
}
/* enable FCoE hash filter */
@@ -326,7 +324,6 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
wr32(hw, I40E_GLFCOE_RCTL, val);
dev_info(&pf->pdev->dev, "FCoE is supported.\n");
- return 0;
}
/**
@@ -1519,10 +1516,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
* same PCI function.
*/
netdev->dev_port = 1;
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index fa371a2a40c6..79ae7beeafe5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -431,9 +431,8 @@ exit_sd_error:
pd_idx1 = max(pd_idx,
((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
- for (i = pd_idx1; i < pd_lmt1; i++) {
+ for (i = pd_idx1; i < pd_lmt1; i++)
i40e_remove_pd_bp(hw, info->hmc_info, i);
- }
i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break;
case I40E_SD_TYPE_DIRECT:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 3dd26cdd0bf2..b825f978d441 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_BUILD 46
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -75,10 +75,13 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
/* required last entry */
{0, }
};
@@ -213,10 +216,10 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
ret = i;
pile->search_hint = i + j;
break;
- } else {
- /* not enough, so skip over it and continue looking */
- i += j;
}
+
+ /* not enough, so skip over it and continue looking */
+ i += j;
}
return ret;
@@ -299,25 +302,69 @@ static void i40e_tx_timeout(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ struct i40e_ring *tx_ring = NULL;
+ unsigned int i, hung_queue = 0;
+ u32 head, val;
pf->tx_timeout_count++;
+ /* find the stopped queue the same way the stack does */
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
+ q = netdev_get_tx_queue(netdev, i);
+ trans_start = q->trans_start ? : netdev->trans_start;
+ if (netif_xmit_stopped(q) &&
+ time_after(jiffies,
+ (trans_start + netdev->watchdog_timeo))) {
+ hung_queue = i;
+ break;
+ }
+ }
+
+ if (i == netdev->num_tx_queues) {
+ netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
+ } else {
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (hung_queue ==
+ vsi->tx_rings[i]->queue_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
+ }
+ }
+ }
+
if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
- pf->tx_timeout_recovery_level = 1;
+ pf->tx_timeout_recovery_level = 1; /* reset after some time */
+ else if (time_before(jiffies,
+ (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
+ return; /* don't do any new action before the next timeout */
+
+ if (tx_ring) {
+ head = i40e_get_head(tx_ring);
+ /* Read interrupt register */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ val = rd32(&pf->hw,
+ I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+ tx_ring->vsi->base_vector - 1));
+ else
+ val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+ netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ vsi->seid, hung_queue, tx_ring->next_to_clean,
+ head, tx_ring->next_to_use,
+ readl(tx_ring->tail), val);
+ }
+
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d\n",
- pf->tx_timeout_recovery_level);
+ netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
+ pf->tx_timeout_recovery_level, hung_queue);
switch (pf->tx_timeout_recovery_level) {
- case 0:
- /* disable and re-enable queues for the VSI */
- if (in_interrupt()) {
- set_bit(__I40E_REINIT_REQUESTED, &pf->state);
- set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
- } else {
- i40e_vsi_reinit_locked(vsi);
- }
- break;
case 1:
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
break;
@@ -329,10 +376,9 @@ static void i40e_tx_timeout(struct net_device *netdev)
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
- set_bit(__I40E_DOWN_REQUESTED, &pf->state);
- set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
break;
}
+
i40e_service_event_schedule(pf);
pf->tx_timeout_recovery_level++;
}
@@ -431,6 +477,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
stats->tx_errors = vsi_stats->tx_errors;
stats->tx_dropped = vsi_stats->tx_dropped;
stats->rx_errors = vsi_stats->rx_errors;
+ stats->rx_dropped = vsi_stats->rx_dropped;
stats->rx_crc_errors = vsi_stats->rx_crc_errors;
stats->rx_length_errors = vsi_stats->rx_length_errors;
@@ -456,11 +503,11 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
if (vsi->rx_rings && vsi->rx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- memset(&vsi->rx_rings[i]->stats, 0 ,
+ memset(&vsi->rx_rings[i]->stats, 0,
sizeof(vsi->rx_rings[i]->stats));
- memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+ memset(&vsi->rx_rings[i]->rx_stats, 0,
sizeof(vsi->rx_rings[i]->rx_stats));
- memset(&vsi->tx_rings[i]->stats, 0 ,
+ memset(&vsi->tx_rings[i]->stats, 0,
sizeof(vsi->tx_rings[i]->stats));
memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i]->tx_stats));
@@ -754,7 +801,6 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
struct i40e_hw_port_stats *nsd = &pf->stats;
struct i40e_hw *hw = &pf->hw;
u64 xoff = 0;
- u16 i, v;
if ((hw->fc.current_mode != I40E_FC_FULL) &&
(hw->fc.current_mode != I40E_FC_RX_PAUSE))
@@ -769,18 +815,6 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
if (!(nsd->link_xoff_rx - xoff))
return;
- /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
-
- if (!vsi || !vsi->tx_rings[0])
- continue;
-
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = vsi->tx_rings[i];
- clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
- }
- }
}
/**
@@ -796,7 +830,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
struct i40e_dcbx_config *dcb_cfg;
struct i40e_hw *hw = &pf->hw;
- u16 i, v;
+ u16 i;
u8 tc;
dcb_cfg = &hw->local_dcbx_config;
@@ -809,6 +843,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
u64 prio_xoff = nsd->priority_xoff_rx[i];
+
i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
pf->stat_offsets_loaded,
&osd->priority_xoff_rx[i],
@@ -821,23 +856,6 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
tc = dcb_cfg->etscfg.prioritytable[i];
xoff[tc] = true;
}
-
- /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
-
- if (!vsi || !vsi->tx_rings[0])
- continue;
-
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = vsi->tx_rings[i];
-
- tc = ring->dcb_tc;
- if (xoff[tc])
- clear_bit(__I40E_HANG_CHECK_ARMED,
- &ring->state);
- }
- }
}
/**
@@ -862,6 +880,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
u32 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
+ u64 tx_linearize;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -880,7 +899,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
*/
rx_b = rx_p = 0;
tx_b = tx_p = 0;
- tx_restart = tx_busy = 0;
+ tx_restart = tx_busy = tx_linearize = 0;
rx_page = 0;
rx_buf = 0;
rcu_read_lock();
@@ -897,6 +916,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy;
+ tx_linearize += p->tx_stats.tx_linearize;
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
@@ -913,6 +933,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rcu_read_unlock();
vsi->tx_restart = tx_restart;
vsi->tx_busy = tx_busy;
+ vsi->tx_linearize = tx_linearize;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
@@ -1256,7 +1277,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
* so we have to go through all the list in order to make sure
*/
list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (f->vlan >= 0)
+ if (f->vlan >= 0 || vsi->info.pvid)
return true;
}
@@ -1334,6 +1355,9 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL when no memory available.
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
**/
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -1392,6 +1416,9 @@ add_filter_out:
* @vlan: the vlan
* @is_vf: make sure it's a VF filter, else doesn't matter
* @is_netdev: make sure it's a netdev filter, else doesn't matter
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
**/
void i40e_del_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -1419,6 +1446,7 @@ void i40e_del_filter(struct i40e_vsi *vsi,
} else {
/* make sure we don't remove a filter in use by VF or netdev */
int min_f = 0;
+
min_f += (f->is_vf ? 1 : 0);
min_f += (f->is_netdev ? 1 : 0);
@@ -1477,6 +1505,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
+
ret = i40e_aq_mac_address_write(&vsi->back->hw,
I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
@@ -1496,8 +1525,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
} else {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
@@ -1508,13 +1539,15 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
} else {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
false, false);
if (f)
f->is_laa = true;
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
- i40e_sync_vsi_filters(vsi);
+ i40e_sync_vsi_filters(vsi, false);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0;
@@ -1684,6 +1717,8 @@ static void i40e_set_rx_mode(struct net_device *netdev)
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
if (!i40e_find_mac(vsi, uca->addr, false, true)) {
@@ -1709,37 +1744,29 @@ static void i40e_set_rx_mode(struct net_device *netdev)
/* remove filter if not in netdev list */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- bool found = false;
if (!f->is_netdev)
continue;
- if (is_multicast_ether_addr(f->macaddr)) {
- netdev_for_each_mc_addr(mca, netdev) {
- if (ether_addr_equal(mca->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
- } else {
- netdev_for_each_uc_addr(uca, netdev) {
- if (ether_addr_equal(uca->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
+ netdev_for_each_mc_addr(mca, netdev)
+ if (ether_addr_equal(mca->addr, f->macaddr))
+ goto bottom_of_search_loop;
- for_each_dev_addr(netdev, ha) {
- if (ether_addr_equal(ha->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
- }
- if (!found)
- i40e_del_filter(
- vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+ netdev_for_each_uc_addr(uca, netdev)
+ if (ether_addr_equal(uca->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ for_each_dev_addr(netdev, ha)
+ if (ether_addr_equal(ha->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+ i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+
+bottom_of_search_loop:
+ continue;
}
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) {
@@ -1749,20 +1776,96 @@ static void i40e_set_rx_mode(struct net_device *netdev)
}
/**
+ * i40e_mac_filter_entry_clone - Clones a MAC filter entry
+ * @src: source MAC filter entry to be clones
+ *
+ * Returns the pointer to newly cloned MAC filter entry or NULL
+ * in case of error
+ **/
+static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
+ struct i40e_mac_filter *src)
+{
+ struct i40e_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+ *f = *src;
+
+ INIT_LIST_HEAD(&f->list);
+
+ return f;
+}
+
+/**
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ * those entries needs to be undone.
+ *
+ * MAC filter entries from list were slated to be removed from device.
+ **/
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+ struct list_head *from)
+{
+ struct i40e_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ f->changed = true;
+ /* Move the element back into MAC filter list*/
+ list_move_tail(&f->list, &vsi->mac_filter_list);
+ }
+}
+
+/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ *
+ * MAC filter entries from list were slated to be added from device.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed && f->counter)
+ f->changed = true;
+ }
+}
+
+/**
+ * i40e_cleanup_add_list - Deletes the element from add list and release
+ * memory
+ * @add_list: Pointer to list which contains MAC filter entries
+ **/
+static void i40e_cleanup_add_list(struct list_head *add_list)
+{
+ struct i40e_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, add_list, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
+ * @grab_rtnl: whether RTNL needs to be grabbed
*
* Push any outstanding VSI filter changes through the AdminQ.
*
* Returns 0 or error value
**/
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct list_head tmp_del_list, tmp_add_list;
+ struct i40e_mac_filter *f, *ftmp, *fclone;
bool promisc_forced_on = false;
bool add_happened = false;
int filter_list_len = 0;
u32 changed_flags = 0;
+ bool err_cond = false;
i40e_status ret = 0;
struct i40e_pf *pf;
int num_add = 0;
@@ -1783,17 +1886,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi->current_netdev_flags = vsi->netdev->flags;
}
+ INIT_LIST_HEAD(&tmp_del_list);
+ INIT_LIST_HEAD(&tmp_add_list);
+
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
- filter_list_len = pf->hw.aq.asq_buf_size /
- sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kcalloc(filter_list_len,
- sizeof(struct i40e_aqc_remove_macvlan_element_data),
- GFP_KERNEL);
- if (!del_list)
- return -ENOMEM;
-
+ spin_lock_bh(&vsi->mac_filter_list_lock);
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
if (!f->changed)
continue;
@@ -1801,6 +1900,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (f->counter != 0)
continue;
f->changed = false;
+
+ /* Move the element into temporary del_list */
+ list_move_tail(&f->list, &tmp_del_list);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed)
+ continue;
+
+ if (f->counter == 0)
+ continue;
+ f->changed = false;
+
+ /* Clone MAC filter entry and add into temporary list */
+ fclone = i40e_mac_filter_entry_clone(f);
+ if (!fclone) {
+ err_cond = true;
+ break;
+ }
+ list_add_tail(&fclone->list, &tmp_add_list);
+ }
+
+ /* if failed to clone MAC filter entry - undo */
+ if (err_cond) {
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi);
+ }
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ if (err_cond)
+ i40e_cleanup_add_list(&tmp_add_list);
+ }
+
+ /* Now process 'del_list' outside the lock */
+ if (!list_empty(&tmp_del_list)) {
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kcalloc(filter_list_len,
+ sizeof(struct i40e_aqc_remove_macvlan_element_data),
+ GFP_KERNEL);
+ if (!del_list) {
+ i40e_cleanup_add_list(&tmp_add_list);
+
+ /* Undo VSI's MAC filter entry element updates */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ return -ENOMEM;
+ }
+
+ list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
cmd_flags = 0;
/* add to delete list */
@@ -1813,10 +1964,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
del_list[num_del].flags = cmd_flags;
num_del++;
- /* unlink from filter list */
- list_del(&f->list);
- kfree(f);
-
/* flush a full buffer */
if (num_del == filter_list_len) {
ret = i40e_aq_remove_macvlan(&pf->hw,
@@ -1827,12 +1974,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
memset(del_list, 0, sizeof(*del_list));
if (ret && aq_err != I40E_AQ_RC_ENOENT)
- dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, aq_err));
+ dev_err(&pf->pdev->dev,
+ "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
}
+ /* Release memory for MAC filter entries which were
+ * synced up with HW.
+ */
+ list_del(&f->list);
+ kfree(f);
}
+
if (num_del) {
ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
del_list, num_del, NULL);
@@ -1848,6 +2001,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
kfree(del_list);
del_list = NULL;
+ }
+
+ if (!list_empty(&tmp_add_list)) {
/* do all the adds now */
filter_list_len = pf->hw.aq.asq_buf_size /
@@ -1855,16 +2011,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
add_list = kcalloc(filter_list_len,
sizeof(struct i40e_aqc_add_macvlan_element_data),
GFP_KERNEL);
- if (!add_list)
+ if (!add_list) {
+ /* Purge element from temporary lists */
+ i40e_cleanup_add_list(&tmp_add_list);
+
+ /* Undo add filter entries from VSI MAC filter list */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_undo_add_filter_entries(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
+ }
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed)
- continue;
+ list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
- if (f->counter == 0)
- continue;
- f->changed = false;
add_happened = true;
cmd_flags = 0;
@@ -1891,7 +2050,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
break;
memset(add_list, 0, sizeof(*add_list));
}
+ /* Entries from tmp_add_list were cloned from MAC
+ * filter list, hence clean those cloned entries
+ */
+ list_del(&f->list);
+ kfree(f);
}
+
if (num_add) {
ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
add_list, num_add, NULL);
@@ -1920,6 +2085,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
+
cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
vsi->seid,
@@ -1934,6 +2100,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
bool cur_promisc;
+
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state));
@@ -1945,7 +2112,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
*/
if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc;
- i40e_do_reset_safe(pf,
+ if (grab_rtnl)
+ i40e_do_reset_safe(pf,
+ BIT(__I40E_PF_RESET_REQUESTED));
+ else
+ i40e_do_reset(pf,
BIT(__I40E_PF_RESET_REQUESTED));
}
} else {
@@ -1996,7 +2167,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
- i40e_sync_vsi_filters(pf->vsi[v]);
+ i40e_sync_vsi_filters(pf->vsi[v], true);
}
}
@@ -2137,6 +2308,9 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
+ /* Locked once because all functions invoked below iterates list*/
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
if (is_netdev) {
add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
is_vf, is_netdev);
@@ -2144,6 +2318,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, vsi->netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2154,6 +2329,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, f->macaddr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2175,6 +2351,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
dev_info(&vsi->back->pdev->dev,
"Could not add filter 0 for %pM\n",
vsi->netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2183,27 +2360,33 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
if (vid > 0 && !vsi->info.pvid) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev)) {
- i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, f->macaddr,
- 0, is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter 0 for %pM\n",
- f->macaddr);
- return -ENOMEM;
- }
+ if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev))
+ continue;
+ i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, f->macaddr,
+ 0, is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter 0 for %pM\n",
+ f->macaddr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ return -ENOMEM;
}
}
}
+ /* Make sure to release before sync_vsi_filter because that
+ * function will lock/unlock as necessary
+ */
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return 0;
- return i40e_sync_vsi_filters(vsi);
+ return i40e_sync_vsi_filters(vsi, false);
}
/**
@@ -2223,6 +2406,9 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
+ /* Locked once because all functions invoked below iterates list */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
if (is_netdev)
i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
@@ -2253,6 +2439,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2261,21 +2448,27 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
list_for_each_entry(f, &vsi->mac_filter_list, list) {
i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
+ is_vf, is_netdev);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, f->macaddr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
}
+ /* Make sure to release before sync_vsi_filter because that
+ * function with lock/unlock as necessary
+ */
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return 0;
- return i40e_sync_vsi_filters(vsi);
+ return i40e_sync_vsi_filters(vsi, false);
}
/**
@@ -2609,8 +2802,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw);
- clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
-
/* cache tail off for easier writes later */
ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
@@ -2882,11 +3073,9 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi)
static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_q_vector *q_vector;
struct i40e_hw *hw = &pf->hw;
u16 vector;
int i, q;
- u32 val;
u32 qp;
/* The interrupt indexing is offset by 1 in the PFINT_ITRn
@@ -2896,7 +3085,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
qp = vsi->base_queue;
vector = vsi->base_vector;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
- q_vector = vsi->q_vectors[i];
+ struct i40e_q_vector *q_vector = vsi->q_vectors[i];
+
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2905,10 +3096,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->tx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
q_vector->tx.itr);
+ wr32(hw, I40E_PFINT_RATEN(vector - 1),
+ INTRL_USEC_TO_REG(vsi->int_rate_limit));
/* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
+ u32 val;
+
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
@@ -2988,6 +3183,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
u32 val;
/* set the ITR configuration */
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
@@ -3046,24 +3242,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
}
/**
- * i40e_irq_dynamic_enable - Enable default interrupt generation settings
- * @vsi: pointer to a vsi
- * @vector: enable a particular Hw Interrupt vector
- **/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
-{
- struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- u32 val;
-
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
- /* skip the flush */
-}
-
-/**
* i40e_irq_dynamic_disable - Disable default interrupt generation settings
* @vsi: pointer to a vsi
* @vector: disable a particular Hw Interrupt vector
@@ -3091,7 +3269,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -3136,8 +3314,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
q_vector);
if (err) {
dev_info(&pf->pdev->dev,
- "%s: request_irq failed, error: %d\n",
- __func__, err);
+ "MSIX request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
/* assign the mask for this irq */
@@ -3202,8 +3379,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
int i;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- for (i = vsi->base_vector;
- i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i);
} else {
i40e_irq_dynamic_enable_icr0(pf);
@@ -3262,9 +3438,12 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_q_vector *q_vector = vsi->q_vectors[0];
/* temporarily disable queue cause for NAPI processing */
u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+
qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_RQCTL(0), qval);
@@ -3273,7 +3452,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
wr32(hw, I40E_QINT_TQCTL(0), qval);
if (!test_bit(__I40E_DOWN, &pf->state))
- napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
+ napi_schedule_irqoff(&q_vector->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -3434,10 +3613,9 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
- i40e_irq_dynamic_enable(vsi,
- tx_ring->q_vector->v_idx + vsi->base_vector);
- }
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+ i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
+
return budget > 0;
}
@@ -3575,14 +3753,12 @@ static void i40e_netpoll(struct net_device *netdev)
if (test_bit(__I40E_DOWN, &vsi->state))
return;
- pf->flags |= I40E_FLAG_IN_NETPOLL;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else {
i40e_intr(pf->pdev->irq, netdev);
}
- pf->flags &= ~I40E_FLAG_IN_NETPOLL;
}
#endif
@@ -3663,9 +3839,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
ret = i40e_pf_txq_wait(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
- "%s: VSI seid %d Tx ring %d %sable timeout\n",
- __func__, vsi->seid, pf_q,
- (enable ? "en" : "dis"));
+ "VSI seid %d Tx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
break;
}
}
@@ -3741,9 +3916,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
ret = i40e_pf_rxq_wait(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
- "%s: VSI seid %d Rx ring %d %sable timeout\n",
- __func__, vsi->seid, pf_q,
- (enable ? "en" : "dis"));
+ "VSI seid %d Rx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
break;
}
}
@@ -4038,17 +4212,15 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
vsi->type == I40E_VSI_FCOE) {
dev_dbg(&vsi->back->pdev->dev,
- "%s: VSI seid %d skipping FCoE VSI disable\n",
- __func__, vsi->seid);
+ "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
return;
}
set_bit(__I40E_NEEDS_RESTART, &vsi->state);
- if (vsi->netdev && netif_running(vsi->netdev)) {
+ if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- } else {
+ else
i40e_vsi_close(vsi);
- }
}
/**
@@ -4113,8 +4285,8 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
ret = i40e_pf_txq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
- "%s: VSI seid %d Tx ring %d disable timeout\n",
- __func__, vsi->seid, pf_q);
+ "VSI seid %d Tx ring %d disable timeout\n",
+ vsi->seid, pf_q);
return ret;
}
}
@@ -4146,6 +4318,108 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
}
#endif
+
+/**
+ * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
+ * @q_idx: TX queue number
+ * @vsi: Pointer to VSI struct
+ *
+ * This function checks specified queue for given VSI. Detects hung condition.
+ * Sets hung bit since it is two step process. Before next run of service task
+ * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
+ * hung condition remain unchanged and during subsequent run, this function
+ * issues SW interrupt to recover from hung condition.
+ **/
+static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring = NULL;
+ struct i40e_pf *pf;
+ u32 head, val, tx_pending;
+ int i;
+
+ pf = vsi->back;
+
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (q_idx == vsi->tx_rings[i]->queue_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
+ }
+ }
+
+ if (!tx_ring)
+ return;
+
+ /* Read interrupt register */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ val = rd32(&pf->hw,
+ I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+ tx_ring->vsi->base_vector - 1));
+ else
+ val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+ head = i40e_get_head(tx_ring);
+
+ tx_pending = i40e_get_tx_pending(tx_ring);
+
+ /* Interrupts are disabled and TX pending is non-zero,
+ * trigger the SW interrupt (don't wait). Worst case
+ * there will be one extra interrupt which may result
+ * into not cleaning any queues because queues are cleaned.
+ */
+ if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+ i40e_force_wb(vsi, tx_ring->q_vector);
+}
+
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @pf: pointer to PF struct
+ *
+ * LAN VSI has netdev and netdev has TX queues. This function is to check
+ * each of those TX queues if they are hung, trigger recovery by issuing
+ * SW interrupt.
+ **/
+static void i40e_detect_recover_hung(struct i40e_pf *pf)
+{
+ struct net_device *netdev;
+ struct i40e_vsi *vsi;
+ int i;
+
+ /* Only for LAN VSI */
+ vsi = pf->vsi[pf->lan_vsi];
+
+ if (!vsi)
+ return;
+
+ /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return;
+
+ /* Make sure type is MAIN VSI */
+ if (vsi->type != I40E_VSI_MAIN)
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev)
+ return;
+
+ /* Bail out if netif_carrier is not OK */
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ /* Go thru' TX queues for netdev */
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+
+ q = netdev_get_tx_queue(netdev, i);
+ if (q)
+ i40e_detect_recover_hung_queue(i, vsi);
+ }
+}
+
/**
* i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
* @pf: pointer to PF
@@ -4745,11 +5019,14 @@ out:
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
*/
-static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{
- char speed[SPEED_SIZE] = "Unknown";
- char fc[FC_SIZE] = "RX/TX";
+ char *speed = "Unknown";
+ char *fc = "Unknown";
+ if (vsi->current_isup == isup)
+ return;
+ vsi->current_isup = isup;
if (!isup) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
return;
@@ -4766,19 +5043,19 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
switch (vsi->back->hw.phy.link_info.link_speed) {
case I40E_LINK_SPEED_40GB:
- strlcpy(speed, "40 Gbps", SPEED_SIZE);
+ speed = "40 G";
break;
case I40E_LINK_SPEED_20GB:
- strncpy(speed, "20 Gbps", SPEED_SIZE);
+ speed = "20 G";
break;
case I40E_LINK_SPEED_10GB:
- strlcpy(speed, "10 Gbps", SPEED_SIZE);
+ speed = "10 G";
break;
case I40E_LINK_SPEED_1GB:
- strlcpy(speed, "1000 Mbps", SPEED_SIZE);
+ speed = "1000 M";
break;
case I40E_LINK_SPEED_100MB:
- strncpy(speed, "100 Mbps", SPEED_SIZE);
+ speed = "100 M";
break;
default:
break;
@@ -4786,20 +5063,20 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
switch (vsi->back->hw.fc.current_mode) {
case I40E_FC_FULL:
- strlcpy(fc, "RX/TX", FC_SIZE);
+ fc = "RX/TX";
break;
case I40E_FC_TX_PAUSE:
- strlcpy(fc, "TX", FC_SIZE);
+ fc = "TX";
break;
case I40E_FC_RX_PAUSE:
- strlcpy(fc, "RX", FC_SIZE);
+ fc = "RX";
break;
default:
- strlcpy(fc, "None", FC_SIZE);
+ fc = "None";
break;
}
- netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
+ netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
speed, fc);
}
@@ -5218,15 +5495,13 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
"VSI reinit requested\n");
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
+
if (vsi != NULL &&
test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
i40e_vsi_reinit_locked(pf->vsi[v]);
clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
}
}
-
- /* no further action needed, so return now */
- return;
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
int v;
@@ -5234,6 +5509,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
dev_info(&pf->pdev->dev, "VSI down requested\n");
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
+
if (vsi != NULL &&
test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
set_bit(__I40E_DOWN, &vsi->state);
@@ -5241,13 +5517,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
}
}
-
- /* no further action needed, so return now */
- return;
} else {
dev_info(&pf->pdev->dev,
"bad reset request 0x%08x\n", reset_flags);
- return;
}
}
@@ -5303,8 +5575,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
- dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
- need_reconfig);
+ dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
return need_reconfig;
}
@@ -5331,16 +5602,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
- dev_dbg(&pf->pdev->dev,
- "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
+ dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
return ret;
/* Check MIB Type and return if event for Remote MIB update */
type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
dev_dbg(&pf->pdev->dev,
- "%s: LLDP event mib type %s\n", __func__,
- type ? "remote" : "local");
+ "LLDP event mib type %s\n", type ? "remote" : "local");
if (type == I40E_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
@@ -5525,7 +5794,9 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
**/
void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
{
+ struct i40e_fdir_filter *filter;
u32 fcnt_prog, fcnt_avail;
+ struct hlist_node *node;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
return;
@@ -5554,6 +5825,18 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
}
}
+
+ /* if hw had a problem adding a filter, delete it */
+ if (pf->fd_inv > 0) {
+ hlist_for_each_entry_safe(filter, node,
+ &pf->fdir_filter_list, fdir_node) {
+ if (filter->fd_id == pf->fd_inv) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ pf->fdir_pf_active_filters--;
+ }
+ }
+ }
}
#define I40E_MIN_FD_FLUSH_INTERVAL 10
@@ -5573,49 +5856,51 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
return;
- if (time_after(jiffies, pf->fd_flush_timestamp +
- (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
- /* If the flush is happening too quick and we have mostly
- * SB rules we should not re-enable ATR for some time.
- */
- min_flush_time = pf->fd_flush_timestamp
- + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
- fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+ if (!time_after(jiffies, pf->fd_flush_timestamp +
+ (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
+ return;
- if (!(time_after(jiffies, min_flush_time)) &&
- (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
- disable_atr = true;
- }
+ /* If the flush is happening too quick and we have mostly SB rules we
+ * should not re-enable ATR for some time.
+ */
+ min_flush_time = pf->fd_flush_timestamp +
+ (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+ fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
- pf->fd_flush_timestamp = jiffies;
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- /* flush all filters */
- wr32(&pf->hw, I40E_PFQF_CTL_1,
- I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
- i40e_flush(&pf->hw);
- pf->fd_flush_cnt++;
- pf->fd_add_err = 0;
- do {
- /* Check FD flush status every 5-6msec */
- usleep_range(5000, 6000);
- reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
- if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
- break;
- } while (flush_wait_retry--);
- if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
- dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
- } else {
- /* replay sideband filters */
- i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
- if (!disable_atr)
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
- }
+ if (!(time_after(jiffies, min_flush_time)) &&
+ (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ disable_atr = true;
+ }
+
+ pf->fd_flush_timestamp = jiffies;
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ /* flush all filters */
+ wr32(&pf->hw, I40E_PFQF_CTL_1,
+ I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+ i40e_flush(&pf->hw);
+ pf->fd_flush_cnt++;
+ pf->fd_add_err = 0;
+ do {
+ /* Check FD flush status every 5-6msec */
+ usleep_range(5000, 6000);
+ reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+ if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+ break;
+ } while (flush_wait_retry--);
+ if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+ dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+ } else {
+ /* replay sideband filters */
+ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+ if (!disable_atr)
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
+
}
/**
@@ -5723,15 +6008,23 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
**/
static void i40e_link_event(struct i40e_pf *pf)
{
- bool new_link, old_link;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
u8 new_link_speed, old_link_speed;
+ i40e_status status;
+ bool new_link, old_link;
/* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true;
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
- new_link = i40e_get_link_status(&pf->hw);
+
+ status = i40e_get_link_status(&pf->hw, &new_link);
+ if (status) {
+ dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
+ status);
+ return;
+ }
+
old_link_speed = pf->hw.phy.link_info_old.link_speed;
new_link_speed = pf->hw.phy.link_info.link_speed;
@@ -5760,68 +6053,6 @@ static void i40e_link_event(struct i40e_pf *pf)
}
/**
- * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
- * @pf: board private structure
- *
- * Set the per-queue flags to request a check for stuck queues in the irq
- * clean functions, then force interrupts to be sure the irq clean is called.
- **/
-static void i40e_check_hang_subtask(struct i40e_pf *pf)
-{
- int i, v;
-
- /* If we're down or resetting, just bail */
- if (test_bit(__I40E_DOWN, &pf->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
- return;
-
- /* for each VSI/netdev
- * for each Tx queue
- * set the check flag
- * for each q_vector
- * force an interrupt
- */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
- int armed = 0;
-
- if (!pf->vsi[v] ||
- test_bit(__I40E_DOWN, &vsi->state) ||
- (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
- continue;
-
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- set_check_for_tx_hang(vsi->tx_rings[i]);
- if (test_bit(__I40E_HANG_CHECK_ARMED,
- &vsi->tx_rings[i]->state))
- armed++;
- }
-
- if (armed) {
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
- wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
- (I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
- I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
- } else {
- u16 vec = vsi->base_vector - 1;
- u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
- for (i = 0; i < vsi->num_q_vectors; i++, vec++)
- wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(vec), val);
- }
- i40e_flush(&vsi->back->hw);
- }
- }
-}
-
-/**
* i40e_watchdog_subtask - periodic checks not using event driven response
* @pf: board private structure
**/
@@ -5840,8 +6071,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
return;
pf->service_timer_previous = jiffies;
- i40e_check_hang_subtask(pf);
- i40e_link_event(pf);
+ if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+ i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
@@ -5850,10 +6081,12 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
if (pf->vsi[i] && pf->vsi[i]->netdev)
i40e_update_stats(pf->vsi[i]);
- /* Update the stats for the active switching components */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i])
- i40e_update_veb_stats(pf->veb[i]);
+ if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+ /* Update the stats for the active switching components */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i])
+ i40e_update_veb_stats(pf->veb[i]);
+ }
i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
}
@@ -6164,8 +6397,9 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb)
{
struct i40e_pf *pf = veb->pf;
- dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
- veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+ dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
if (veb->bridge_mode & BRIDGE_MODE_VEPA)
i40e_disable_pf_switch_lb(pf);
else
@@ -6232,6 +6466,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (pf->vsi[v]->veb_idx == veb->idx) {
struct i40e_vsi *vsi = pf->vsi[v];
+
vsi->uplink_seid = veb->seid;
ret = i40e_add_vsi(vsi);
if (ret) {
@@ -6296,12 +6531,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
- if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
- (pf->hw.aq.fw_maj_ver < 2)) {
- pf->hw.func_caps.num_msix_vectors++;
- pf->hw.func_caps.num_msix_vectors_vf++;
- }
-
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -6514,9 +6743,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
#endif /* CONFIG_I40E_DCB */
#ifdef I40E_FCOE
- ret = i40e_init_pf_fcoe(pf);
- if (ret)
- dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
+ i40e_init_pf_fcoe(pf);
#endif
/* do basic switch setup */
@@ -6538,9 +6765,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* make sure our flow control settings are restored */
ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
if (ret)
- dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
@@ -6610,6 +6837,15 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
ret = i40e_setup_misc_vector(pf);
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+ * PAUSE or PFC frames and potentially controlling traffic for other
+ * PF/VF VSIs.
+ * The FW can still send Flow control frames if enabled.
+ */
+ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ pf->main_vsi_seid);
+
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
@@ -6808,6 +7044,7 @@ static void i40e_service_task(struct work_struct *work)
return;
}
+ i40e_detect_recover_hung(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
i40e_vc_process_vflr_event(pf);
@@ -6991,6 +7228,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->idx = vsi_idx;
vsi->rx_itr_setting = pf->rx_itr_default;
vsi->tx_itr_setting = pf->tx_itr_default;
+ vsi->int_rate_limit = 0;
vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
pf->rss_table_size : 64;
vsi->netdev_registered = false;
@@ -7009,6 +7247,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+ /* Initialize VSI lock */
+ spin_lock_init(&vsi->mac_filter_list_lock);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
goto unlock_pf;
@@ -7566,7 +7806,7 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
"Cannot set RSS key, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- return ret;
+ goto config_rss_aq_out;
}
if (vsi->type == I40E_VSI_MAIN)
@@ -7580,6 +7820,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+config_rss_aq_out:
+ kfree(rss_lut);
return ret;
}
@@ -7854,6 +8096,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
+ I40E_FLAG_LINK_POLLING_ENABLED |
I40E_FLAG_MSIX_ENABLED;
if (iommu_present(&pci_bus_type))
@@ -7896,12 +8139,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- } else {
+ if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+ pf->hw.num_partitions > 1)
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
- }
+ else
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
pf->fdir_pf_filter_count =
pf->hw.func_caps.fd_filters_guaranteed;
pf->hw.fdir_shared_filter_count =
@@ -7915,9 +8158,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
#ifdef I40E_FCOE
- err = i40e_init_pf_fcoe(pf);
- if (err)
- dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
+ i40e_init_pf_fcoe(pf);
#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV
@@ -7941,6 +8182,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
+ /* By default FW has this off for performance reasons */
+ pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
/* set up queue assignment tracking */
size = sizeof(struct i40e_lump_tracking)
+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
@@ -8120,9 +8364,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
pf->vxlan_ports[idx] = 0;
pf->pending_vxlan_bitmap |= BIT_ULL(idx);
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
-
- dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
- ntohs(port));
} else {
netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
ntohs(port));
@@ -8274,13 +8515,15 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
* @seq: RTNL message seq #
* @dev: the netdev being configured
* @filter_mask: unused
+ * @nlflags: netlink flags passed in
*
* Return the mode in which the hardware bridge is operating in
* i.e VEB or VEPA.
**/
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask, int nlflags)
+ u32 __always_unused filter_mask,
+ int nlflags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -8309,7 +8552,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
/**
* i40e_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff
- * @netdev: This physical port's netdev
+ * @dev: This physical port's netdev
* @features: Offload features that the stack believes apply
**/
static netdev_features_t i40e_features_check(struct sk_buff *skb,
@@ -8424,17 +8667,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
* default a MAC-VLAN filter that accepts any tagged packet
* which must be replaced by a normal filter.
*/
- if (!i40e_rm_default_mac_filter(vsi, mac_addr))
+ if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, mac_addr,
I40E_VLAN_ANY, false, true);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ }
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
pf->vsi[pf->lan_vsi]->netdev->name);
random_ether_addr(mac_addr);
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -8490,12 +8742,22 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
return 1;
veb = pf->veb[vsi->veb_idx];
+ if (!veb) {
+ dev_info(&pf->pdev->dev,
+ "There is no veb associated with the bridge\n");
+ return -ENOENT;
+ }
+
/* Uplink is a bridge in VEPA mode */
- if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
+ if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
return 0;
+ } else {
+ /* Uplink is a bridge in VEB mode */
+ return 1;
+ }
- /* Uplink is a bridge in VEB mode */
- return 1;
+ /* VEPA is now default bridge, so return 0 */
+ return 0;
}
/**
@@ -8508,10 +8770,13 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
- struct i40e_mac_filter *f, *ftmp;
+ u8 laa_macaddr[ETH_ALEN];
+ bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
+ struct i40e_mac_filter *f, *ftmp;
+
u8 enabled_tc = 0x1; /* TC0 enabled */
int f_count = 0;
@@ -8683,32 +8948,41 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->id = ctxt.vsi_number;
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
f->changed = true;
f_count++;
+ /* Expected to have only one MAC filter entry for LAA in list */
if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
- struct i40e_aqc_remove_macvlan_element_data element;
+ ether_addr_copy(laa_macaddr, f->macaddr);
+ found_laa_mac_filter = true;
+ }
+ }
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, f->macaddr);
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- ret = i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- if (ret) {
- /* some older FW has a different default */
- element.flags |=
- I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- }
+ if (found_laa_mac_filter) {
+ struct i40e_aqc_remove_macvlan_element_data element;
- i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_WOL,
- f->macaddr, NULL);
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, laa_macaddr);
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid,
+ &element, 1, NULL);
+ if (ret) {
+ /* some older FW has a different default */
+ element.flags |=
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ i40e_aq_remove_macvlan(hw, vsi->seid,
+ &element, 1, NULL);
}
+
+ i40e_aq_mac_address_write(hw,
+ I40E_AQC_WRITE_TYPE_LAA_WOL,
+ laa_macaddr, NULL);
}
+
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
pf->flags |= I40E_FLAG_FILTER_SYNC;
@@ -8771,10 +9045,13 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan,
f->is_vf, f->is_netdev);
- i40e_sync_vsi_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ i40e_sync_vsi_filters(vsi, false);
i40e_vsi_delete(vsi);
i40e_vsi_free_q_vectors(vsi);
@@ -8999,8 +9276,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (veb) {
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
dev_info(&vsi->back->pdev->dev,
- "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
- __func__);
+ "New VSI creation error, uplink seid of LAN VSI expected.\n");
return NULL;
}
/* We come up by default in VEPA mode if SRIOV is not
@@ -9650,6 +9926,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
} else {
/* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+
pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
@@ -9673,7 +9950,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_config_rss(pf);
/* fill in link information and enable LSE reporting */
- i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+ i40e_update_link_info(&pf->hw);
i40e_link_event(pf);
/* Initialize user-specific link properties */
@@ -9791,8 +10068,14 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
}
pf->queues_left = queues_left;
+ dev_dbg(&pf->pdev->dev,
+ "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
+ pf->hw.func_caps.num_tx_qp,
+ !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+ pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
+ pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
#ifdef I40E_FCOE
- dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
+ dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
#endif
}
@@ -9860,12 +10143,19 @@ static void i40e_print_features(struct i40e_pf *pf)
}
if (pf->flags & I40E_FLAG_DCB_CAPABLE)
buf += sprintf(buf, "DCB ");
+#if IS_ENABLED(CONFIG_VXLAN)
+ buf += sprintf(buf, "VxLAN ");
+#endif
if (pf->flags & I40E_FLAG_PTP)
buf += sprintf(buf, "PTP ");
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED)
buf += sprintf(buf, "FCOE ");
#endif
+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ buf += sprintf(buf, "VEB ");
+ else
+ buf += sprintf(buf, "VEPA ");
BUG_ON(buf > (string + INFO_STRING_LEN));
dev_info(&pf->pdev->dev, "%s\n", string);
@@ -9886,14 +10176,15 @@ static void i40e_print_features(struct i40e_pf *pf)
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- unsigned long ioremap_len;
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
+ u16 wol_nvm_bits;
u16 link_status;
- int err = 0;
+ int err;
u32 len;
u32 i;
+ u8 set_fc_aq_fail;
err = pci_enable_device_mem(pdev);
if (err)
@@ -9939,15 +10230,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &pf->hw;
hw->back = pf;
- ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
- I40E_MAX_CSR_SPACE);
+ pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
+ I40E_MAX_CSR_SPACE);
- hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
if (!hw->hw_addr) {
err = -EIO;
dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
(unsigned int)pci_resource_start(pdev, 0),
- (unsigned int)pci_resource_len(pdev, 0), err);
+ pf->ioremap_len, err);
goto err_ioremap;
}
hw->vendor_id = pdev->vendor;
@@ -10005,7 +10296,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->hw.fc.requested_mode = I40E_FC_NONE;
err = i40e_init_adminq(hw);
- dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+
+ /* provide nvm, fw, api versions */
+ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ i40e_nvm_version_str(hw));
+
if (err) {
dev_info(&pdev->dev,
"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
@@ -10105,10 +10402,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&pf->service_task, i40e_service_task);
clear_bit(__I40E_SERVICE_SCHED, &pf->state);
pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
- pf->link_check_timeout = jiffies;
- /* WoL defaults to disabled */
- pf->wol_en = false;
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
+ pf->wol_en = false;
+ else
+ pf->wol_en = true;
device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
/* set up the main switch operations */
@@ -10149,6 +10449,25 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+
+ /* Make sure flow control is set according to current settings */
+ err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+ if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+ dev_dbg(&pf->pdev->dev,
+ "Set fc with err %s aq_err %s on get_phy_cap\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+ dev_dbg(&pf->pdev->dev,
+ "Set fc with err %s aq_err %s on set_phy_config\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+ dev_dbg(&pf->pdev->dev,
+ "Set fc with err %s aq_err %s on get_link_info\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+
/* if FDIR VSI was set up, start it now */
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -10239,37 +10558,82 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_fcoe_vsi_setup(pf);
#endif
- /* Get the negotiated link width and speed from PCI config space */
- pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+#define PCI_SPEED_SIZE 8
+#define PCI_WIDTH_SIZE 8
+ /* Devices on the IOSF bus do not have this information
+ * and will report PCI Gen 1 x 1 by default so don't bother
+ * checking them.
+ */
+ if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
+ char speed[PCI_SPEED_SIZE] = "Unknown";
+ char width[PCI_WIDTH_SIZE] = "Unknown";
- i40e_set_pci_config_data(hw, link_status);
+ /* Get the negotiated link width and speed from PCI config
+ * space
+ */
+ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
+ &link_status);
+
+ i40e_set_pci_config_data(hw, link_status);
+
+ switch (hw->bus.speed) {
+ case i40e_bus_speed_8000:
+ strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ case i40e_bus_speed_5000:
+ strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ case i40e_bus_speed_2500:
+ strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ default:
+ break;
+ }
+ switch (hw->bus.width) {
+ case i40e_bus_width_pcie_x8:
+ strncpy(width, "8", PCI_WIDTH_SIZE); break;
+ case i40e_bus_width_pcie_x4:
+ strncpy(width, "4", PCI_WIDTH_SIZE); break;
+ case i40e_bus_width_pcie_x2:
+ strncpy(width, "2", PCI_WIDTH_SIZE); break;
+ case i40e_bus_width_pcie_x1:
+ strncpy(width, "1", PCI_WIDTH_SIZE); break;
+ default:
+ break;
+ }
- dev_info(&pdev->dev, "PCI-Express: %s %s\n",
- (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
- hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
- hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
- "Unknown"),
- (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
- hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
- hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
- hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
- "Unknown"));
+ dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
+ speed, width);
- if (hw->bus.width < i40e_bus_width_pcie_x8 ||
- hw->bus.speed < i40e_bus_speed_8000) {
- dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
- dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+ hw->bus.speed < i40e_bus_speed_8000) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
}
/* get the requested speeds from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
- dev_info(&pf->pdev->dev,
- "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
- i40e_stat_str(&pf->hw, err),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+ /* get the supported phy types from the fw */
+ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+ if (err)
+ dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
+
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+ * PAUSE or PFC frames and potentially controlling traffic for other
+ * PF/VF VSIs.
+ * The FW can still send Flow control frames if enabled.
+ */
+ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ pf->main_vsi_seid);
+
/* print a string summarizing features */
i40e_print_features(pf);
@@ -10317,6 +10681,7 @@ err_dma:
static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
i40e_status ret_code;
int i;
@@ -10324,6 +10689,10 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_ptp_stop(pf);
+ /* Disable RSS in hw */
+ wr32(hw, I40E_PFQF_HENA(0), 0);
+ wr32(hw, I40E_PFQF_HENA(1), 0);
+
/* no more scheduling of any task */
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
@@ -10440,7 +10809,7 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
int err;
u32 reg;
- dev_info(&pdev->dev, "%s\n", __func__);
+ dev_dbg(&pdev->dev, "%s\n", __func__);
if (pci_enable_device_mem(pdev)) {
dev_info(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
@@ -10480,13 +10849,13 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "%s\n", __func__);
+ dev_dbg(&pdev->dev, "%s\n", __func__);
if (test_bit(__I40E_SUSPENDED, &pf->state))
return;
rtnl_lock();
i40e_handle_reset_warning(pf);
- rtnl_lock();
+ rtnl_unlock();
}
/**
@@ -10572,9 +10941,7 @@ static int i40e_resume(struct pci_dev *pdev)
err = pci_enable_device_mem(pdev);
if (err) {
- dev_err(&pdev->dev,
- "%s: Cannot enable PCI device from suspend\n",
- __func__);
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 9b83abc0e774..6100cdd9ad13 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -290,9 +290,18 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- if (hw->mac.type == I40E_MAC_X722)
- return i40e_read_nvm_word_aq(hw, offset, data);
- return i40e_read_nvm_word_srctl(hw, offset, data);
+ enum i40e_status_code ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ }
+ return ret_code;
}
/**
@@ -397,9 +406,19 @@ read_nvm_buffer_aq_exit:
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
- if (hw->mac.type == I40E_MAC_X722)
- return i40e_read_nvm_buffer_aq(hw, offset, words, data);
- return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ enum i40e_status_code ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+ return ret_code;
}
/**
@@ -418,6 +437,10 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
bool last_command)
{
i40e_status ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
@@ -443,7 +466,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, NULL);
+ data, last_command, &cmd_details);
return ret_code;
}
@@ -461,7 +484,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
- i40e_status ret_code = 0;
+ i40e_status ret_code;
struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
@@ -541,13 +564,16 @@ i40e_calc_nvm_checksum_exit:
**/
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ i40e_status ret_code;
u16 checksum;
+ __le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- if (!ret_code)
+ if (!ret_code) {
+ le_sum = cpu_to_le16(checksum);
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
- 1, &checksum, true);
+ 1, &le_sum, true);
+ }
return ret_code;
}
@@ -592,25 +618,31 @@ i40e_validate_nvm_checksum_exit:
static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+ u8 *bytes, int *perrno);
static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+ u8 *bytes, int *perrno);
static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *errno);
+ int *perrno);
static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *errno);
+ int *perrno);
static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+ u8 *bytes, int *perrno);
static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+ u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -620,7 +652,7 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
-static char *i40e_nvm_update_state_str[] = {
+static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
"I40E_NVMUPD_READ_SNT",
@@ -634,6 +666,9 @@ static char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_CSUM_CON",
"I40E_NVMUPD_CSUM_SA",
"I40E_NVMUPD_CSUM_LCB",
+ "I40E_NVMUPD_STATUS",
+ "I40E_NVMUPD_EXEC_AQ",
+ "I40E_NVMUPD_GET_AQ_RESULT",
};
/**
@@ -641,30 +676,60 @@ static char *i40e_nvm_update_state_str[] = {
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* Dispatches command depending on what update state is current
**/
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
i40e_status status;
+ enum i40e_nvmupd_cmd upd_cmd;
/* assume success */
- *errno = 0;
+ *perrno = 0;
+
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->aq.nvm_release_on_done);
+
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
+ }
+
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ bytes[0] = hw->nvmupd_state;
+ return 0;
+ }
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
- status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_STATE_READING:
- status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_STATE_WRITING:
- status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ status = I40E_ERR_NOT_READY;
+ *perrno = -EBUSY;
break;
default:
@@ -672,7 +737,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: no such state %d\n", hw->nvmupd_state);
status = I40E_NOT_SUPPORTED;
- *errno = -ESRCH;
+ *perrno = -ESRCH;
break;
}
return status;
@@ -683,28 +748,28 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* Process legitimate commands of the Init state and conditionally set next
* state. Reject all other commands.
**/
static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
}
break;
@@ -712,10 +777,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_READ_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
if (status)
i40e_release_nvm(hw);
else
@@ -726,70 +791,83 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_WRITE_ERA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
- if (status)
+ status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
}
break;
case I40E_NVMUPD_WRITE_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
- if (status)
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
}
break;
case I40E_NVMUPD_WRITE_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status)
i40e_release_nvm(hw);
else
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
case I40E_NVMUPD_CSUM_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
- *errno = hw->aq.asq_last_status ?
+ *perrno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status) :
-EIO;
i40e_release_nvm(hw);
} else {
hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
break;
+ case I40E_NVMUPD_EXEC_AQ:
+ status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_GET_AQ_RESULT:
+ status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
+ break;
+
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
i40e_nvm_update_state_str[upd_cmd]);
status = I40E_ERR_NVM;
- *errno = -ESRCH;
+ *perrno = -ESRCH;
break;
}
return status;
@@ -800,28 +878,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands.
**/
static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
- i40e_status status;
+ i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
case I40E_NVMUPD_READ_CON:
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_READ_LCB:
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
@@ -831,7 +909,7 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
"NVMUPD: bad cmd %s in reading state.\n",
i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
- *errno = -ESRCH;
+ *perrno = -ESRCH;
break;
}
return status;
@@ -842,55 +920,68 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands
**/
static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
- i40e_status status;
+ i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
bool retry_attempt = false;
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (!status)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
break;
case I40E_NVMUPD_WRITE_LCB:
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
- if (!status)
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
hw->aq.nvm_release_on_done = true;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
break;
case I40E_NVMUPD_CSUM_CON:
status = i40e_update_nvm_checksum(hw);
if (status) {
- *errno = hw->aq.asq_last_status ?
+ *perrno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status) :
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
case I40E_NVMUPD_CSUM_LCB:
status = i40e_update_nvm_checksum(hw);
- if (status)
- *errno = hw->aq.asq_last_status ?
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status) :
-EIO;
- else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
hw->aq.nvm_release_on_done = true;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
break;
default:
@@ -898,7 +989,7 @@ retry:
"NVMUPD: bad cmd %s in writing state.\n",
i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
- *errno = -ESRCH;
+ *perrno = -ESRCH;
break;
}
@@ -941,21 +1032,22 @@ retry:
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* Return one of the valid command types or I40E_NVMUPD_INVALID
**/
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *errno)
+ int *perrno)
{
enum i40e_nvmupd_cmd upd_cmd;
- u8 transaction;
+ u8 module, transaction;
/* anything that doesn't match a recognized case is an error */
upd_cmd = I40E_NVMUPD_INVALID;
transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
/* limits on data size */
if ((cmd->data_size < 1) ||
@@ -963,7 +1055,7 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_validate_command data_size %d\n",
cmd->data_size);
- *errno = -EFAULT;
+ *perrno = -EFAULT;
return I40E_NVMUPD_INVALID;
}
@@ -982,6 +1074,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
case I40E_NVM_SA:
upd_cmd = I40E_NVMUPD_READ_SA;
break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
}
break;
@@ -1011,21 +1109,155 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
case (I40E_NVM_CSUM|I40E_NVM_LCB):
upd_cmd = I40E_NVMUPD_CSUM_LCB;
break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
}
break;
}
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
- i40e_nvm_update_state_str[upd_cmd],
- hw->nvmupd_state,
- hw->aq.nvm_release_on_done);
- if (upd_cmd == I40E_NVMUPD_INVALID) {
- *errno = -EFAULT;
+ return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ i40e_status status;
+ struct i40e_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command returns %d errno %d\n",
- upd_cmd, *errno);
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
}
- return upd_cmd;
+ aq_desc = (struct i40e_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
+ }
+
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ }
+ }
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ memcpy(bytes, buff, len);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ memcpy(bytes, buff, remainder);
+ }
+
+ return 0;
}
/**
@@ -1033,14 +1265,15 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* cmd structure contains identifiers and data buffer
**/
static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
+ struct i40e_asq_cmd_details cmd_details;
i40e_status status;
u8 module, transaction;
bool last;
@@ -1049,8 +1282,11 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- bytes, last, NULL);
+ bytes, last, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
@@ -1058,7 +1294,7 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_read status %d aq %d\n",
status, hw->aq.asq_last_status);
- *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
return status;
@@ -1068,23 +1304,28 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
* i40e_nvmupd_nvm_erase - Erase an NVM module
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *errno)
+ int *perrno)
{
i40e_status status = 0;
+ struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- last, NULL);
+ last, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
@@ -1092,7 +1333,7 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_erase status %d aq %d\n",
status, hw->aq.asq_last_status);
- *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
return status;
@@ -1103,15 +1344,16 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
i40e_status status = 0;
+ struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
bool last;
@@ -1119,8 +1361,12 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
status = i40e_aq_update_nvm(hw, module, cmd->offset,
- (u16)cmd->data_size, bytes, last, NULL);
+ (u16)cmd->data_size, bytes, last,
+ &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
@@ -1128,7 +1374,7 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write status %d aq %d\n",
status, hw->aq.asq_last_status);
- *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index dcb72a8ee8e5..bb9d583e5416 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -58,8 +58,8 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -258,7 +258,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw);
i40e_status i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
-bool i40e_get_link_status(struct i40e_hw *hw);
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+i40e_status i40e_update_link_info(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
u32 *max_bw, u32 *min_bw, bool *min_valid,
@@ -321,4 +322,6 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
void *buff, u16 *ret_buff_size,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 8c40d6ea15fd..565ca7c835bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -618,9 +618,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
/* Attempt to register the clock before enabling the hardware. */
pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
- if (IS_ERR(pf->ptp_clock)) {
+ if (IS_ERR(pf->ptp_clock))
return PTR_ERR(pf->ptp_clock);
- }
/* clear the hwtstamp settings here during clock create, instead of
* during regular init, so that we can maintain settings across a
@@ -675,8 +674,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
struct timespec64 ts;
u32 regval;
- dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
- netdev->name);
+ if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+ dev_info(&pf->pdev->dev, "PHC enabled\n");
pf->flags |= I40E_FLAG_PTP;
/* Ensure the clocks are running. */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 738aca68f665..635b3ac17877 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -465,10 +465,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
- rx_desc->wb.qword0.hi_dword.fd_id);
+ pf->fd_inv);
/* Check if the programming error is for ATR.
* If so, auto disable ATR and set a state for
@@ -601,27 +602,13 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
-/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
u32 head, tail;
@@ -635,50 +622,6 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
return 0;
}
-/**
- * i40e_check_tx_hang - Is there a hang in the Tx queue
- * @tx_ring: the ring of descriptors
- **/
-static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
-{
- u32 tx_done = tx_ring->stats.packets;
- u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
- u32 tx_pending = i40e_get_tx_pending(tx_ring);
- struct i40e_pf *pf = tx_ring->vsi->back;
- bool ret = false;
-
- clear_check_for_tx_hang(tx_ring);
-
- /* Check for a hung queue, but be thorough. This verifies
- * that a transmit has been completed since the previous
- * check AND there is at least one packet pending. The
- * ARMED bit is set to indicate a potential hang. The
- * bit is cleared if a pause frame is received to remove
- * false hang detection due to PFC or 802.3x frames. By
- * requiring this to fail twice we avoid races with
- * PFC clearing the ARMED bit and conditions where we
- * run the check_tx_hang logic with a transmit completion
- * pending but without time to complete it yet.
- */
- if ((tx_done_old == tx_done) && tx_pending) {
- /* make sure it is true for two checks in a row */
- ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
- &tx_ring->state);
- } else if (tx_done_old == tx_done &&
- (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
- if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
- dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
- tx_pending, tx_ring->queue_index);
- pf->tx_sluggish_count++;
- } else {
- /* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_done;
- clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
- }
-
- return ret;
-}
-
#define WB_STRIDE 0x3
/**
@@ -784,42 +727,21 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
- /* check to see if there are any non-cache aligned descriptors
- * waiting to be written back, and kick the hardware to force
- * them to be written back in case of napi polling
- */
- if (budget &&
- !((i & WB_STRIDE) == WB_STRIDE) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
- (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
- tx_ring->arm_wb = true;
- else
- tx_ring->arm_wb = false;
-
- if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
- /* schedule immediate reset if we believe we hung */
- dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
- " VSI <%d>\n"
- " Tx Queue <%d>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n",
- tx_ring->vsi->seid,
- tx_ring->queue_index,
- tx_ring->next_to_use, i);
-
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-
- dev_info(tx_ring->dev,
- "tx hang detected on queue %d, reset requested\n",
- tx_ring->queue_index);
-
- /* do not fire the reset immediately, wait for the stack to
- * decide we are truly stuck, also prevents every queue from
- * simultaneously requesting a reset
+ if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ unsigned int j = 0;
+
+ /* check to see if there are < 4 descriptors
+ * waiting to be written back, then kick the hardware to force
+ * them to be written back in case we stay in NAPI.
+ * In this mode on X722 we do not enable Interrupt.
*/
+ j = i40e_get_tx_pending(tx_ring);
- /* the adapter is about to reset, no point in enabling polling */
- budget = 1;
+ if (budget &&
+ ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
}
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -851,7 +773,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
* @q_vector: the vector on which to force writeback
*
**/
-static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
u16 flags = q_vector->tx.ring[0].flags;
@@ -893,6 +815,8 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
* i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data
*
+ * Returns true if ITR changed, false if not
+ *
* Stores a new ITR value based on packets and byte counts during
* the last interrupt. The advantage of per interrupt computation
* is faster updates and more accurate ITR for the current traffic
@@ -901,21 +825,32 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
* testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{
enum i40e_latency_range new_latency_range = rc->latency_range;
+ struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr;
int bytes_per_int;
+ int usecs;
if (rc->total_packets == 0 || !rc->itr)
- return;
+ return false;
/* simple throttlerate management
- * 0-10MB/s lowest (100000 ints/s)
+ * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (8000 ints/s)
+ * 20-1249MB/s bulk (18000 ints/s)
+ * > 40000 Rx packets per second (8000 ints/s)
+ *
+ * The math works out because the divisor is in 10^(-6) which
+ * turns the bytes/us input value into MB/s values, but
+ * make sure to use usecs, as the register values written
+ * are in 2 usec increments in the ITR registers, and make sure
+ * to use the smoothed values that the countdown timer gives us.
*/
- bytes_per_int = rc->total_bytes / rc->itr;
+ usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+ bytes_per_int = rc->total_bytes / usecs;
+
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
@@ -928,35 +863,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
new_latency_range = I40E_LOWEST_LATENCY;
break;
case I40E_BULK_LATENCY:
- if (bytes_per_int <= 20)
- new_latency_range = I40E_LOW_LATENCY;
- break;
+ case I40E_ULTRA_LATENCY:
default:
if (bytes_per_int <= 20)
new_latency_range = I40E_LOW_LATENCY;
break;
}
+
+ /* this is to adjust RX more aggressively when streaming small
+ * packets. The value of 40000 was picked as it is just beyond
+ * what the hardware can receive per second if in low latency
+ * mode.
+ */
+#define RX_ULTRA_PACKET_RATE 40000
+
+ if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+ (&qv->rx == rc))
+ new_latency_range = I40E_ULTRA_LATENCY;
+
rc->latency_range = new_latency_range;
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
- new_itr = I40E_ITR_100K;
+ new_itr = I40E_ITR_50K;
break;
case I40E_LOW_LATENCY:
new_itr = I40E_ITR_20K;
break;
case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_18K;
+ break;
+ case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K;
break;
default:
break;
}
- if (new_itr != rc->itr)
- rc->itr = new_itr;
-
rc->total_bytes = 0;
rc->total_packets = 0;
+
+ if (new_itr != rc->itr) {
+ rc->itr = new_itr;
+ return true;
+ }
+
+ return false;
}
/**
@@ -1002,6 +954,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!dev)
return -ENOMEM;
+ /* warn if we are about to overwrite the pointer */
+ WARN_ON(tx_ring->tx_bi);
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!tx_ring->tx_bi)
@@ -1162,6 +1116,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
struct device *dev = rx_ring->dev;
int bi_size;
+ /* warn if we are about to overwrite the pointer */
+ WARN_ON(rx_ring->rx_bi);
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!rx_ring->rx_bi)
@@ -1342,16 +1298,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb, u16 vlan_tag)
{
struct i40e_q_vector *q_vector = rx_ring->q_vector;
- struct i40e_vsi *vsi = rx_ring->vsi;
- u64 flags = vsi->back->flags;
if (vlan_tag & VLAN_VID_MASK)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (flags & I40E_FLAG_IN_NETPOLL)
- netif_rx(skb);
- else
- napi_gro_receive(&q_vector->napi, skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -1518,7 +1469,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- const int current_node = numa_node_id();
+ const int current_node = numa_mem_id();
struct i40e_vsi *vsi = rx_ring->vsi;
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
@@ -1596,6 +1547,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
cleaned_count++;
if (rx_hbo || rx_sph) {
int len;
+
if (rx_hbo)
len = I40E_RX_HDR_SIZE;
else
@@ -1781,9 +1733,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1828,6 +1777,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
return total_rx_packets;
}
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+ return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_PFINT_DYN_CTLN
+
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
* @vsi: the VSI we care about
@@ -1838,56 +1802,69 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{
struct i40e_hw *hw = &vsi->back->hw;
- u16 old_itr;
+ bool rx = false, tx = false;
+ u32 rxval, txval;
int vector;
- u32 val;
vector = (q_vector->v_idx + vsi->base_vector);
+
+ /* avoid dynamic calculation if in countdown mode OR if
+ * all dynamic is disabled
+ */
+ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+ if (q_vector->itr_countdown > 0 ||
+ (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ goto enable_int;
+ }
+
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr) {
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_RX_ITR <<
- I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (q_vector->rx.itr <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
- } else {
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_ITR_NONE <<
- I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- }
- if (!test_bit(__I40E_DOWN, &vsi->state))
- wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
- } else {
- i40e_irq_dynamic_enable(vsi,
- q_vector->v_idx + vsi->base_vector);
+ rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
+
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr) {
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_TX_ITR <<
- I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (q_vector->tx.itr <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
- } else {
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_ITR_NONE <<
- I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- }
- if (!test_bit(__I40E_DOWN, &vsi->state))
- wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
- vsi->base_vector - 1), val);
- } else {
- i40e_irq_dynamic_enable(vsi,
- q_vector->v_idx + vsi->base_vector);
+ tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+ txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
+ }
+
+ if (rx || tx) {
+ /* get the higher of the two ITR adjustments and
+ * use the same value for both ITR registers
+ * when in adaptive mode (Rx and/or Tx)
+ */
+ u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+ q_vector->tx.itr = q_vector->rx.itr = itr;
+ txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+ tx = true;
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+ rx = true;
}
+
+ /* only need to enable the interrupt once, but need
+ * to possibly update both ITR values
+ */
+ if (rx) {
+ /* set the INTENA_MSK_MASK so that this first write
+ * won't actually enable the interrupt, instead just
+ * updating the ITR (it's bit 31 PF and VF)
+ */
+ rxval |= BIT(31);
+ /* don't check _DOWN because interrupt isn't being enabled */
+ wr32(hw, INTREG(vector - 1), rxval);
+ }
+
+enable_int:
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, INTREG(vector - 1), txval);
+
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
+ else
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
}
/**
@@ -1908,7 +1885,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
- int cleaned;
+ int work_done = 0;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
@@ -1921,24 +1898,34 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb |= ring->arm_wb;
+ ring->arm_wb = false;
}
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0)
+ goto tx_only;
+
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
+ int cleaned;
+
if (ring_is_ps_enabled(ring))
cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
else
cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+
+ work_done += cleaned;
/* if we didn't clean as many as budgeted, we must be done */
clean_complete &= (budget_per_ring != cleaned);
}
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
+tx_only:
if (arm_wb)
i40e_force_wb(vsi, q_vector);
return budget;
@@ -1948,7 +1935,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
q_vector->arm_wb_state = false;
/* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
i40e_update_enable_itr(vsi, q_vector);
} else { /* Legacy mode */
@@ -2156,6 +2143,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
+
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
return -EINVAL;
@@ -2199,6 +2187,7 @@ out:
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
+ * @cd_type_cmd_tso_mss: ptr to u64 object
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
@@ -2258,6 +2247,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @tx_flags: the collected send information
+ * @cd_type_cmd_tso_mss: ptr to u64 object
*
* Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
**/
@@ -2300,6 +2290,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
* @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
* @cd_tunneling: ptr to context desc bits
**/
static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
@@ -2324,6 +2315,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
+ case IPPROTO_GRE:
+ l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
+ break;
default:
return;
}
@@ -2581,6 +2575,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
+ u16 desc_count = 0;
+ bool tail_bump = true;
+ bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2621,6 +2618,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
+ desc_count++;
+
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
@@ -2640,6 +2639,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
+ desc_count++;
+
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
@@ -2654,34 +2655,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- /* Place RS bit on last descriptor of any packet that spans across the
- * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
- */
- if (((i & WB_STRIDE) != WB_STRIDE) &&
- (first <= &tx_ring->tx_bi[i]) &&
- (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
- I40E_TXD_QW1_CMD_SHIFT);
- } else {
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TXD_CMD <<
- I40E_TXD_QW1_CMD_SHIFT);
- }
-
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
-
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
@@ -2691,15 +2664,72 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ /* Algorithm to optimize tail and RS bit setting:
+ * if xmit_more is supported
+ * if xmit_more is true
+ * do not update tail and do not mark RS bit.
+ * if xmit_more is false and last xmit_more was false
+ * if every packet spanned less than 4 desc
+ * then set RS bit on 4th packet and update tail
+ * on every packet
+ * else
+ * update tail and set RS bit on every packet.
+ * if xmit_more is false and last_xmit_more was true
+ * update tail and set RS bit.
+ *
+ * Optimization: wmb to be issued only in case of tail update.
+ * Also optimize the Descriptor WB path for RS bit with the same
+ * algorithm.
+ *
+ * Note: If there are less than 4 packets
+ * pending and interrupts were disabled the service task will
+ * trigger a force WB.
+ */
+ if (skb->xmit_more &&
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index))) {
+ tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+ tail_bump = false;
+ } else if (!skb->xmit_more &&
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index)) &&
+ (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
+ (tx_ring->packet_stride < WB_STRIDE) &&
+ (desc_count < WB_STRIDE)) {
+ tx_ring->packet_stride++;
+ } else {
+ tx_ring->packet_stride = 0;
+ tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+ do_rs = true;
+ }
+ if (do_rs)
+ tx_ring->packet_stride = 0;
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
+ I40E_TX_DESC_CMD_EOP) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+
/* notify HW of packet */
- if (!skb->xmit_more ||
- netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)))
- writel(i, tx_ring->tail);
- else
+ if (!tail_bump)
prefetchw(tx_desc + 1);
+ if (tail_bump) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, tx_ring->tail);
+ }
+
return;
dma_error:
@@ -2776,6 +2806,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u8 hdr_len = 0;
int tsyn;
int tso;
+
if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
@@ -2808,10 +2839,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
- if (i40e_chk_linearize(skb, tx_flags))
+ if (i40e_chk_linearize(skb, tx_flags)) {
if (skb_linearize(skb))
goto out_drop;
-
+ tx_ring->tx_stats.tx_linearize++;
+ }
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index f1385a1989fa..6779fb771d6a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -32,11 +32,14 @@
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
+#define I40E_ITR_50K 0x000A
#define I40E_ITR_20K 0x0019
+#define I40E_ITR_18K 0x001B
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
-#define I40E_ITR_RX_DEF I40E_ITR_8K
-#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF I40E_ITR_20K
+#define I40E_ITR_TX_DEF I40E_ITR_20K
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -44,6 +47,15 @@
#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K 125 /* 8000 ints/sec */
+#define I40E_INTRL_62K 16 /* 62500 ints/sec */
+#define I40E_INTRL_83K 12 /* 83333 ints/sec */
#define I40E_QUEUE_END_OF_LIST 0x7FF
@@ -79,12 +91,12 @@ enum i40e_dyn_idx_t {
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
- BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
#define i40e_pf_get_default_rss_hena(pf) \
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
@@ -165,6 +177,7 @@ struct i40e_tx_buffer {
};
unsigned int bytecount;
unsigned short gso_segs;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
@@ -188,6 +201,7 @@ struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
+ u64 tx_linearize;
};
struct i40e_rx_queue_stats {
@@ -199,8 +213,6 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_TX_DETECT_HANG,
- __I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -211,12 +223,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \
clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define check_for_tx_hang(ring) \
- test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
- set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
- clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
@@ -264,10 +270,12 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
+ u8 packet_stride;
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
+#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */
struct i40e_queue_stats stats;
@@ -290,6 +298,7 @@ enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2,
+ I40E_ULTRA_LATENCY = 3,
};
struct i40e_ring_container {
@@ -326,4 +335,20 @@ int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags);
#endif
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
+u32 i40e_get_tx_pending(struct i40e_ring *ring);
+
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 4842239ee777..dd2da356d9a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -33,29 +33,7 @@
#include "i40e_adminq.h"
#include "i40e_hmc.h"
#include "i40e_lan_hmc.h"
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_QEMU 0x1574
-#define I40E_DEV_ID_KX_A 0x157F
-#define I40E_DEV_ID_KX_B 0x1580
-#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_QSFP_A 0x1583
-#define I40E_DEV_ID_QSFP_B 0x1584
-#define I40E_DEV_ID_QSFP_C 0x1585
-#define I40E_DEV_ID_10G_BASE_T 0x1586
-#define I40E_DEV_ID_20G_KR2 0x1587
-#define I40E_DEV_ID_VF 0x154C
-#define I40E_DEV_ID_VF_HV 0x1571
-#define I40E_DEV_ID_SFP_X722 0x37D0
-#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
-#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
-#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
-
-#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
- (d) == I40E_DEV_ID_QSFP_B || \
- (d) == I40E_DEV_ID_QSFP_C)
+#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
#define I40E_MASK(mask, shift) (mask << shift)
@@ -158,14 +136,14 @@ enum i40e_set_fc_aq_failures {
};
enum i40e_vsi_type {
- I40E_VSI_MAIN = 0,
- I40E_VSI_VMDQ1,
- I40E_VSI_VMDQ2,
- I40E_VSI_CTRL,
- I40E_VSI_FCOE,
- I40E_VSI_MIRROR,
- I40E_VSI_SRIOV,
- I40E_VSI_FDIR,
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN
};
@@ -189,16 +167,65 @@ struct i40e_link_status {
bool crc_enable;
u8 pacing;
u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+#define I40E_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
+#define I40E_MODULE_TYPE_40G_LR4 0x02
+#define I40E_MODULE_TYPE_40G_SR4 0x04
+#define I40E_MODULE_TYPE_40G_CR4 0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+#define I40E_MODULE_TYPE_1000BASE_CX 0x04
+#define I40E_MODULE_TYPE_1000BASE_T 0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+ I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
+ I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
+ I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+ I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
+ I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+ I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
+ I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
+ I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
+ I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
+ I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+ I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+ I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
+ I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
+ I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
+ I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+ I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+ I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+ I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
+ I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
+ I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
+ BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+ I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
};
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
- u32 autoneg_advertised;
- u32 phy_id;
- u32 module_type;
bool get_link_info;
enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ enum i40e_aq_capabilities_phy_type phy_types;
};
#define I40E_HW_CAP_MAX_GPIO 30
@@ -287,6 +314,7 @@ struct i40e_nvm_info {
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
u16 version; /* NVM package version */
u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
};
/* definitions used in NVM update support */
@@ -305,12 +333,17 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_CSUM_CON,
I40E_NVMUPD_CSUM_SA,
I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
};
enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_INIT,
I40E_NVMUPD_STATE_READING,
- I40E_NVMUPD_STATE_WRITING
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -329,6 +362,7 @@ enum i40e_nvmupd_state {
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -409,6 +443,8 @@ struct i40e_fc_info {
#define I40E_APP_PROTOID_FIP 0x8914
#define I40E_APP_SEL_ETHTYPE 0x1
#define I40E_APP_SEL_TCPIP 0x2
+#define I40E_CEE_APP_SEL_ETHTYPE 0x0
+#define I40E_CEE_APP_SEL_TCPIP 0x1
/* CEE or IEEE 802.1Qaz ETS Configuration data */
struct i40e_dcb_ets_config {
@@ -439,6 +475,8 @@ struct i40e_dcbx_config {
u8 dcbx_mode;
#define I40E_DCBX_MODE_CEE 0x1
#define I40E_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define I40E_DCBX_APPS_NON_WILLING 0x1
u32 numapps;
u32 tlv_status; /* CEE mode TLV status */
struct i40e_dcb_ets_config etscfg;
@@ -492,6 +530,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_virt_mem nvm_buff;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -500,8 +540,12 @@ struct i40e_hw {
u16 dcbx_status;
/* DCBX info */
- struct i40e_dcbx_config local_dcbx_config;
- struct i40e_dcbx_config remote_dcbx_config;
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+ u64 flags;
/* debug mask */
u32 debug_mask;
@@ -1024,8 +1068,8 @@ enum i40e_filter_program_desc_fd_status {
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
- BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
@@ -1193,6 +1237,8 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
+#define I40E_SR_BOOT_CONFIG_PTR 0x17
+#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 0f8d4156f8b1..ae879826084b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -81,7 +81,6 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
- I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -151,6 +150,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index d99c116032f3..44462b40f2d7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -536,6 +536,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
}
if (type == I40E_VSI_SRIOV) {
u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
/* If the port VLAN has been configured and then the
@@ -546,6 +547,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
*/
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
@@ -558,10 +561,11 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (!f)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
/* program mac filter */
- ret = i40e_sync_vsi_filters(vsi);
+ ret = i40e_sync_vsi_filters(vsi, false);
if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
@@ -605,6 +609,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
/* map PF queues to VF queues */
for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
+
reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
total_queue_pairs++;
@@ -701,6 +706,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
*/
vf->num_queue_pairs = 0;
vf->vf_states = 0;
+ clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
}
/**
@@ -839,11 +845,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
complete_reset:
/* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
- i40e_alloc_vf_res(vf);
- i40e_enable_vf_mappings(vf);
- set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
- clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
-
+ if (!i40e_alloc_vf_res(vf)) {
+ i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ }
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
i40e_flush(hw);
@@ -872,6 +878,11 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
false);
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+ i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+ false);
+
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
@@ -933,6 +944,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
pf->num_alloc_vfs = 0;
goto err_iov;
}
@@ -957,8 +969,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* VF resources get allocated during reset */
i40e_reset_vf(&vfs[i], false);
- /* enable VF vplan_qtable mappings */
- i40e_enable_vf_mappings(&vfs[i]);
}
pf->num_alloc_vfs = num_alloc_vfs;
@@ -986,24 +996,26 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev);
int err = 0;
- if (pf->state & __I40E_TESTING) {
+ if (test_bit(__I40E_TESTING, &pf->state)) {
dev_warn(&pdev->dev,
"Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
err = -EPERM;
goto err_out;
}
- dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
i40e_free_vfs(pf);
else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
goto out;
if (num_vfs > pf->num_req_vfs) {
+ dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
+ num_vfs, pf->num_req_vfs);
err = -EPERM;
goto err_out;
}
+ dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
err = i40e_alloc_vfs(pf, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
@@ -1094,6 +1106,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
}
} else {
vf->num_valid_msgs++;
+ /* reset the invalid counter, if a valid message is received. */
+ vf->num_invalid_msgs = 0;
}
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
@@ -1195,16 +1209,22 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
} else {
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
+
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
if (vf->lan_vsi_idx) {
vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
- vfres->vsi_res[i].num_queue_pairs =
- pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
- memcpy(vfres->vsi_res[i].default_mac_addr,
- vf->default_lan_addr.addr, ETH_ALEN);
+ vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+ /* VFs only use TC 0 */
+ vfres->vsi_res[i].qset_handle
+ = le16_to_cpu(vsi->info.qs_handle[0]);
+ ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+ vf->default_lan_addr.addr);
i++;
}
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -1582,6 +1602,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ /* Lock once, because all function inside for loop accesses VSI's
+ * MAC filter list which needs to be protected using same lock.
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
@@ -1600,12 +1625,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n");
ret = I40E_ERR_PARAM;
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
}
}
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */
- if (i40e_sync_vsi_filters(vsi))
+ if (i40e_sync_vsi_filters(vsi, false))
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param:
@@ -1650,13 +1677,15 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ spin_lock_bh(&vsi->mac_filter_list_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
i40e_del_filter(vsi, al->list[i].addr,
I40E_VLAN_ANY, true, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */
- if (i40e_sync_vsi_filters(vsi))
+ if (i40e_sync_vsi_filters(vsi, false))
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param:
@@ -1708,6 +1737,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
/* add new VLAN filter */
int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+
if (ret)
dev_err(&pf->pdev->dev,
"Unable to add VF vlan filter %d, error %d\n",
@@ -1759,6 +1789,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+
if (ret)
dev_err(&pf->pdev->dev,
"Unable to delete VF vlan filter %d, error %d\n",
@@ -1870,7 +1901,6 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
return -EPERM;
- break;
}
/* few more checks */
if ((valid_len != msglen) || (err_msg_format)) {
@@ -2049,6 +2079,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
+ /* Lock once because below invoked function add/del_filter requires
+ * mac_filter_list_lock to be held
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
/* delete the temporary mac address */
i40e_del_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1,
@@ -2060,9 +2095,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
- if (i40e_sync_vsi_filters(vsi)) {
+ if (i40e_sync_vsi_filters(vsi, false)) {
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
ret = -EIO;
goto error_param;
@@ -2089,8 +2126,10 @@ error_param:
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos)
{
+ u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
+ bool is_vsi_in_vlan = false;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
int ret = 0;
@@ -2116,12 +2155,15 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
- if (le16_to_cpu(vsi->info.pvid) ==
- (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)))
+ if (le16_to_cpu(vsi->info.pvid) == vlanprio)
/* duplicate request, so just return success */
goto error_pvid;
- if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
@@ -2141,7 +2183,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
* MAC addresses deleted.
*/
if ((!(vlan_id || qos) ||
- (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+ vlanprio != le16_to_cpu(vsi->info.pvid)) &&
vsi->info.pvid)
ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
@@ -2156,8 +2198,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
}
}
if (vlan_id || qos)
- ret = i40e_vsi_add_pvid(vsi,
- vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+ ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
i40e_vsi_remove_pvid(vsi);
@@ -2310,7 +2351,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->vf = vf_id;
- memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
+ ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
ivi->max_tx_rate = vf->tx_rate;
ivi->min_tx_rate = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 736f6f08b4f2..da44995def42 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -29,8 +29,6 @@
#include "i40e.h"
-#define I40E_MAX_MACVLAN_FILTERS 256
-#define I40E_MAX_VLAN_FILTERS 256
#define I40E_MAX_VLANID 4095
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
@@ -98,7 +96,8 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u64 num_mdd_events; /* num of mdd events detected */
- u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
+ /* num of continuous malformed or invalid msgs detected */
+ u64 num_invalid_msgs;
u64 num_valid_msgs; /* num of valid msgs detected */
unsigned long vf_caps; /* vf's adv. capabilities */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index a23ebfd5cd25..fd123ca60761 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -469,8 +469,12 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
- if (hw->aq.asq.count == 0)
- return I40E_ERR_NOT_READY;
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.asq.head, 0);
@@ -479,16 +483,13 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
- /* make sure lock is available */
- mutex_lock(&hw->aq.asq_mutex);
-
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_asq_bufs(hw);
+shutdown_asq_out:
mutex_unlock(&hw->aq.asq_mutex);
-
return ret_code;
}
@@ -502,8 +503,12 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
- if (hw->aq.arq.count == 0)
- return I40E_ERR_NOT_READY;
+ mutex_lock(&hw->aq.arq_mutex);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.arq.head, 0);
@@ -512,16 +517,13 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
- /* make sure lock is available */
- mutex_lock(&hw->aq.arq_mutex);
-
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_arq_bufs(hw);
+shutdown_arq_out:
mutex_unlock(&hw->aq.arq_mutex);
-
return ret_code;
}
@@ -596,6 +598,9 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
/* destroy the locks */
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+
return ret_code;
}
@@ -617,8 +622,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "%s: ntc %d head %d.\n", __func__, ntc,
- rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
@@ -682,19 +686,23 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
u16 retval = 0;
u32 val = 0;
- val = rd32(hw, hw->aq.asq.head);
- if (val >= hw->aq.num_asq_entries) {
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "AQTX: head overrun at %d\n", val);
+ "AQTX: Admin queue not initialized.\n");
status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
+ goto asq_send_command_error;
}
- if (hw->aq.asq.count == 0) {
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Admin queue not initialized.\n");
+ "AQTX: head overrun at %d\n", val);
status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
+ goto asq_send_command_error;
}
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
@@ -719,8 +727,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
desc->flags &= ~cpu_to_le16(details->flags_dis);
desc->flags |= cpu_to_le16(details->flags_ena);
- mutex_lock(&hw->aq.asq_mutex);
-
if (buff_size > hw->aq.asq_buf_size) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
@@ -830,6 +836,10 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
buff_size);
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ *details->wb_desc = *desc_on_ring;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -841,7 +851,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
asq_send_command_error:
mutex_unlock(&hw->aq.asq_mutex);
-asq_send_command_exit:
return status;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index ef43d68f67b3..a3eae5d9a2bd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -69,6 +69,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
+ struct i40e_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
@@ -108,9 +109,10 @@ struct i40e_adminq_info {
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
**/
-static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
@@ -142,8 +144,9 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
return -EAGAIN;
- if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
+
return aq_to_posix[aq_rc];
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index c8022092d369..fcb9ef34cc7a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1719,11 +1719,13 @@ struct i40e_aqc_get_link_status {
u8 phy_type; /* i40e_aq_phy_type */
u8 link_speed; /* i40e_aq_link_speed */
u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION 0x01
#define I40E_AQ_LINK_FAULT 0x02
#define I40E_AQ_LINK_FAULT_TX 0x04
#define I40E_AQ_LINK_FAULT_RX 0x08
#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_LINK_UP_PORT 0x20
#define I40E_AQ_MEDIA_AVAILABLE 0x40
#define I40E_AQ_SIGNAL_DETECT 0x80
u8 an_info;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index d45d0ae6bd3b..72b1942a94aa 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -51,7 +51,9 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_SFP_X722:
@@ -85,7 +87,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
* @hw: pointer to the HW structure
* @aq_err: the AQ error code to convert
**/
-char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
{
switch (aq_err) {
case I40E_AQ_RC_OK:
@@ -145,7 +147,7 @@ char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
**/
-char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
{
switch (stat_err) {
case 0:
@@ -329,25 +331,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
len = buf_len;
/* write the full 16-byte chunks */
for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask,
- "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, buf[i], buf[i + 1], buf[i + 2],
- buf[i + 3], buf[i + 4], buf[i + 5],
- buf[i + 6], buf[i + 7], buf[i + 8],
- buf[i + 9], buf[i + 10], buf[i + 11],
- buf[i + 12], buf[i + 13], buf[i + 14],
- buf[i + 15]);
+ i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
/* write whatever's left over without overrunning the buffer */
- if (i < len) {
- char d_buf[80];
- int j = 0;
-
- memset(d_buf, 0, sizeof(d_buf));
- j += sprintf(d_buf, "\t0x%04X ", i);
- while (i < len)
- j += sprintf(&d_buf[j], " %02X", buf[i++]);
- i40e_debug(hw, mask, "%s\n", d_buf);
- }
+ if (i < len)
+ i40e_debug(hw, mask, "\t0x%04X %*ph\n",
+ i, len - i, buf + i);
}
}
@@ -441,9 +429,6 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
- cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
- cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
-
status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
return status;
@@ -518,8 +503,6 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
- cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
- cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
@@ -990,10 +973,10 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
- memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
- ETH_ALEN);
- memcpy(hw->mac.addr, vsi_res->default_mac_addr,
- ETH_ALEN);
+ ether_addr_copy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr);
+ ether_addr_copy(hw->mac.addr,
+ vsi_res->default_mac_addr);
}
vsi_res++;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
new file mode 100644
index 000000000000..e6a39c9862e8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 55ae4b0f8192..cbd9a1b078ab 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -60,8 +60,8 @@ void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -101,4 +101,6 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 7e91d825c760..47e9a90d6b10 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -140,65 +140,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head);
}
-/**
- * i40e_get_tx_pending - how many tx descriptors not processed
- * @tx_ring: the ring of descriptors
- *
- * Since there is no access to the ring head register
- * in XL710, we need to use our local copies
- **/
-static u32 i40e_get_tx_pending(struct i40e_ring *ring)
-{
- u32 head, tail;
-
- head = i40e_get_head(ring);
- tail = readl(ring->tail);
-
- if (head != tail)
- return (head < tail) ?
- tail - head : (tail + ring->count - head);
-
- return 0;
-}
-
-/**
- * i40e_check_tx_hang - Is there a hang in the Tx queue
- * @tx_ring: the ring of descriptors
- **/
-static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
-{
- u32 tx_done = tx_ring->stats.packets;
- u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
- u32 tx_pending = i40e_get_tx_pending(tx_ring);
- bool ret = false;
-
- clear_check_for_tx_hang(tx_ring);
-
- /* Check for a hung queue, but be thorough. This verifies
- * that a transmit has been completed since the previous
- * check AND there is at least one packet pending. The
- * ARMED bit is set to indicate a potential hang. The
- * bit is cleared if a pause frame is received to remove
- * false hang detection due to PFC or 802.3x frames. By
- * requiring this to fail twice we avoid races with
- * PFC clearing the ARMED bit and conditions where we
- * run the check_tx_hang logic with a transmit completion
- * pending but without time to complete it yet.
- */
- if ((tx_done_old == tx_done) && tx_pending) {
- /* make sure it is true for two checks in a row */
- ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
- &tx_ring->state);
- } else if (tx_done_old == tx_done &&
- (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
- /* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_done;
- clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
- }
-
- return ret;
-}
-
#define WB_STRIDE 0x3
/**
@@ -304,36 +245,15 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+ /* check to see if there are any non-cache aligned descriptors
+ * waiting to be written back, and kick the hardware to force
+ * them to be written back in case of napi polling
+ */
if (budget &&
!((i & WB_STRIDE) == WB_STRIDE) &&
!test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
- else
- tx_ring->arm_wb = false;
-
- if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
- /* schedule immediate reset if we believe we hung */
- dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
- " VSI <%d>\n"
- " Tx Queue <%d>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n",
- tx_ring->vsi->seid,
- tx_ring->queue_index,
- tx_ring->next_to_use, i);
-
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-
- dev_info(tx_ring->dev,
- "tx hang detected on queue %d, resetting adapter\n",
- tx_ring->queue_index);
-
- tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
-
- /* the adapter is about to reset, no point in enabling stuff */
- return true;
- }
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
@@ -355,16 +275,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
}
- return budget > 0;
+ return !!budget;
}
/**
- * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors
+ * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors
* @vsi: the VSI we care about
* @q_vector: the vector on which to force writeback
*
**/
-static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
u16 flags = q_vector->tx.ring[0].flags;
@@ -398,6 +318,8 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
* i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data
*
+ * Returns true if ITR changed, false if not
+ *
* Stores a new ITR value based on packets and byte counts during
* the last interrupt. The advantage of per interrupt computation
* is faster updates and more accurate ITR for the current traffic
@@ -406,21 +328,32 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
* testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{
enum i40e_latency_range new_latency_range = rc->latency_range;
+ struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr;
int bytes_per_int;
+ int usecs;
if (rc->total_packets == 0 || !rc->itr)
- return;
+ return false;
/* simple throttlerate management
- * 0-10MB/s lowest (100000 ints/s)
+ * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (8000 ints/s)
+ * 20-1249MB/s bulk (18000 ints/s)
+ * > 40000 Rx packets per second (8000 ints/s)
+ *
+ * The math works out because the divisor is in 10^(-6) which
+ * turns the bytes/us input value into MB/s values, but
+ * make sure to use usecs, as the register values written
+ * are in 2 usec increments in the ITR registers, and make sure
+ * to use the smoothed values that the countdown timer gives us.
*/
- bytes_per_int = rc->total_bytes / rc->itr;
+ usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+ bytes_per_int = rc->total_bytes / usecs;
+
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
@@ -433,35 +366,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
new_latency_range = I40E_LOWEST_LATENCY;
break;
case I40E_BULK_LATENCY:
- if (bytes_per_int <= 20)
- new_latency_range = I40E_LOW_LATENCY;
- break;
+ case I40E_ULTRA_LATENCY:
default:
if (bytes_per_int <= 20)
new_latency_range = I40E_LOW_LATENCY;
break;
}
+
+ /* this is to adjust RX more aggressively when streaming small
+ * packets. The value of 40000 was picked as it is just beyond
+ * what the hardware can receive per second if in low latency
+ * mode.
+ */
+#define RX_ULTRA_PACKET_RATE 40000
+
+ if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+ (&qv->rx == rc))
+ new_latency_range = I40E_ULTRA_LATENCY;
+
rc->latency_range = new_latency_range;
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
- new_itr = I40E_ITR_100K;
+ new_itr = I40E_ITR_50K;
break;
case I40E_LOW_LATENCY:
new_itr = I40E_ITR_20K;
break;
case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_18K;
+ break;
+ case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K;
break;
default:
break;
}
- if (new_itr != rc->itr)
- rc->itr = new_itr;
-
rc->total_bytes = 0;
rc->total_packets = 0;
+
+ if (new_itr != rc->itr) {
+ rc->itr = new_itr;
+ return true;
+ }
+
+ return false;
}
/*
@@ -822,16 +772,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb, u16 vlan_tag)
{
struct i40e_q_vector *q_vector = rx_ring->q_vector;
- struct i40e_vsi *vsi = rx_ring->vsi;
- u64 flags = vsi->back->flags;
if (vlan_tag & VLAN_VID_MASK)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (flags & I40E_FLAG_IN_NETPOLL)
- netif_rx(skb);
- else
- napi_gro_receive(&q_vector->napi, skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -997,7 +942,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- const int current_node = numa_node_id();
+ const int current_node = numa_mem_id();
struct i40e_vsi *vsi = rx_ring->vsi;
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
@@ -1067,6 +1012,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
cleaned_count++;
if (rx_hbo || rx_sph) {
int len;
+
if (rx_hbo)
len = I40E_RX_HDR_SIZE;
else
@@ -1240,9 +1186,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1274,6 +1217,21 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
return total_rx_packets;
}
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+ u32 val;
+
+ val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+
+ return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_VFINT_DYN_CTLN1
+
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
* @vsi: the VSI we care about
@@ -1284,55 +1242,67 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{
struct i40e_hw *hw = &vsi->back->hw;
- u16 old_itr;
+ bool rx = false, tx = false;
+ u32 rxval, txval;
int vector;
- u32 val;
vector = (q_vector->v_idx + vsi->base_vector);
+
+ /* avoid dynamic calculation if in countdown mode OR if
+ * all dynamic is disabled
+ */
+ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+ if (q_vector->itr_countdown > 0 ||
+ (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ goto enable_int;
+ }
+
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr) {
- val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
- (I40E_RX_ITR <<
- I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
- (q_vector->rx.itr <<
- I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
- } else {
- val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
- (I40E_ITR_NONE <<
- I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
- }
- if (!test_bit(__I40E_DOWN, &vsi->state))
- wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
- } else {
- i40evf_irq_enable_queues(vsi->back, 1
- << q_vector->v_idx);
+ rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr) {
- val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
- (I40E_TX_ITR <<
- I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
- (q_vector->tx.itr <<
- I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+ tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+ txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
+ }
+ if (rx || tx) {
+ /* get the higher of the two ITR adjustments and
+ * use the same value for both ITR registers
+ * when in adaptive mode (Rx and/or Tx)
+ */
+ u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
- } else {
- val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
- (I40E_ITR_NONE <<
- I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
- }
- if (!test_bit(__I40E_DOWN, &vsi->state))
- wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
- } else {
- i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+ q_vector->tx.itr = q_vector->rx.itr = itr;
+ txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+ tx = true;
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+ rx = true;
}
+
+ /* only need to enable the interrupt once, but need
+ * to possibly update both ITR values
+ */
+ if (rx) {
+ /* set the INTENA_MSK_MASK so that this first write
+ * won't actually enable the interrupt, instead just
+ * updating the ITR (it's bit 31 PF and VF)
+ */
+ rxval |= BIT(31);
+ /* don't check _DOWN because interrupt isn't being enabled */
+ wr32(hw, INTREG(vector - 1), rxval);
+ }
+
+enable_int:
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, INTREG(vector - 1), txval);
+
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
+ else
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
}
/**
@@ -1353,7 +1323,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
- int cleaned;
+ int work_done = 0;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
@@ -1366,26 +1336,36 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb |= ring->arm_wb;
+ ring->arm_wb = false;
}
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0)
+ goto tx_only;
+
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
+ int cleaned;
+
if (ring_is_ps_enabled(ring))
cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
else
cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+
+ work_done += cleaned;
/* if we didn't clean as many as budgeted, we must be done */
clean_complete &= (budget_per_ring != cleaned);
}
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
+tx_only:
if (arm_wb)
- i40e_force_wb(vsi, q_vector);
+ i40evf_force_wb(vsi, q_vector);
return budget;
}
@@ -1393,7 +1373,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
q_vector->arm_wb_state = false;
/* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
i40e_update_enable_itr(vsi, q_vector);
return 0;
}
@@ -1437,6 +1417,7 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
+
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
return -EINVAL;
@@ -1979,6 +1960,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso;
+
if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
@@ -2006,10 +1988,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
- if (i40e_chk_linearize(skb, tx_flags))
+ if (i40e_chk_linearize(skb, tx_flags)) {
if (skb_linearize(skb))
goto out_drop;
-
+ tx_ring->tx_stats.tx_linearize++;
+ }
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 9a30f5d8c089..ebc1bf77f036 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -32,11 +32,14 @@
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
+#define I40E_ITR_50K 0x000A
#define I40E_ITR_20K 0x0019
+#define I40E_ITR_18K 0x001B
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
-#define I40E_ITR_RX_DEF I40E_ITR_8K
-#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF I40E_ITR_20K
+#define I40E_ITR_TX_DEF I40E_ITR_20K
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -44,6 +47,15 @@
#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K 125 /* 8000 ints/sec */
+#define I40E_INTRL_62K 16 /* 62500 ints/sec */
+#define I40E_INTRL_83K 12 /* 83333 ints/sec */
#define I40E_QUEUE_END_OF_LIST 0x7FF
@@ -79,16 +91,16 @@ enum i40e_dyn_idx_t {
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
- BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
#define i40e_pf_get_default_rss_hena(pf) \
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
@@ -164,6 +176,7 @@ struct i40e_tx_buffer {
};
unsigned int bytecount;
unsigned short gso_segs;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
@@ -187,6 +200,7 @@ struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
+ u64 tx_linearize;
};
struct i40e_rx_queue_stats {
@@ -198,8 +212,6 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_TX_DETECT_HANG,
- __I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -210,12 +222,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \
clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define check_for_tx_hang(ring) \
- test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
- set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
- clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
@@ -287,6 +293,7 @@ enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2,
+ I40E_ULTRA_LATENCY = 3,
};
struct i40e_ring_container {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 24a2693869a1..301fe2b6dd03 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -33,29 +33,7 @@
#include "i40e_adminq.h"
#include "i40e_hmc.h"
#include "i40e_lan_hmc.h"
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_QEMU 0x1574
-#define I40E_DEV_ID_KX_A 0x157F
-#define I40E_DEV_ID_KX_B 0x1580
-#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_QSFP_A 0x1583
-#define I40E_DEV_ID_QSFP_B 0x1584
-#define I40E_DEV_ID_QSFP_C 0x1585
-#define I40E_DEV_ID_10G_BASE_T 0x1586
-#define I40E_DEV_ID_20G_KR2 0x1587
-#define I40E_DEV_ID_VF 0x154C
-#define I40E_DEV_ID_VF_HV 0x1571
-#define I40E_DEV_ID_SFP_X722 0x37D0
-#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
-#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
-#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
-
-#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
- (d) == I40E_DEV_ID_QSFP_B || \
- (d) == I40E_DEV_ID_QSFP_C)
+#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
#define I40E_MASK(mask, shift) (mask << shift)
@@ -158,14 +136,14 @@ enum i40e_set_fc_aq_failures {
};
enum i40e_vsi_type {
- I40E_VSI_MAIN = 0,
- I40E_VSI_VMDQ1,
- I40E_VSI_VMDQ2,
- I40E_VSI_CTRL,
- I40E_VSI_FCOE,
- I40E_VSI_MIRROR,
- I40E_VSI_SRIOV,
- I40E_VSI_FDIR,
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN
};
@@ -189,16 +167,65 @@ struct i40e_link_status {
bool crc_enable;
u8 pacing;
u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+#define I40E_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
+#define I40E_MODULE_TYPE_40G_LR4 0x02
+#define I40E_MODULE_TYPE_40G_SR4 0x04
+#define I40E_MODULE_TYPE_40G_CR4 0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+#define I40E_MODULE_TYPE_1000BASE_CX 0x04
+#define I40E_MODULE_TYPE_1000BASE_T 0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+ I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
+ I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
+ I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+ I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
+ I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+ I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
+ I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
+ I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
+ I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
+ I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+ I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+ I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
+ I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
+ I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
+ I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+ I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+ I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+ I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
+ I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
+ I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
+ BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+ I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
};
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
- u32 autoneg_advertised;
- u32 phy_id;
- u32 module_type;
bool get_link_info;
enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ enum i40e_aq_capabilities_phy_type phy_types;
};
#define I40E_HW_CAP_MAX_GPIO 30
@@ -286,6 +313,7 @@ struct i40e_nvm_info {
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
u16 version; /* NVM package version */
u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
};
/* definitions used in NVM update support */
@@ -304,12 +332,17 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_CSUM_CON,
I40E_NVMUPD_CSUM_SA,
I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
};
enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_INIT,
I40E_NVMUPD_STATE_READING,
- I40E_NVMUPD_STATE_WRITING
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -328,6 +361,7 @@ enum i40e_nvmupd_state {
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -486,6 +520,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_virt_mem nvm_buff;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -494,8 +530,9 @@ struct i40e_hw {
u16 dcbx_status;
/* DCBX info */
- struct i40e_dcbx_config local_dcbx_config;
- struct i40e_dcbx_config remote_dcbx_config;
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
/* debug mask */
u32 debug_mask;
@@ -1018,8 +1055,8 @@ enum i40e_filter_program_desc_fd_status {
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
- BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
@@ -1162,6 +1199,7 @@ struct i40e_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index e6db20e8a395..9f7b279b9d9c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,7 +81,6 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
- I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -151,6 +150,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 3817cbbf45e6..22fc3d49c4b9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -48,10 +48,6 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "i40evf: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
- __func__ , ## args)))
/* dummy struct to make common code less painful */
struct i40e_vsi {
@@ -70,6 +66,7 @@ struct i40e_vsi {
*/
u16 rx_itr_setting;
u16 tx_itr_setting;
+ u16 qs_handle;
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -90,7 +87,7 @@ struct i40e_vsi {
#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096
#define I40EVF_AQ_LEN 32
-#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */
+#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
@@ -115,6 +112,8 @@ struct i40e_q_vector {
struct i40e_ring_container tx;
u32 ring_mask;
u8 num_ringpairs; /* total number of ring pairs in vector */
+#define ITR_COUNTDOWN_START 100
+ u8 itr_countdown; /* when 0 or 1 update ITR */
int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
bool arm_wb_state;
@@ -214,7 +213,6 @@ struct i40evf_adapter {
#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
-#define I40EVF_FLAG_IN_NETPOLL BIT(4)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
@@ -223,10 +221,10 @@ struct i40evf_adapter {
#define I40EVF_FLAG_RESET_NEEDED BIT(10)
#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
+#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
-#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index e85849b9ff98..d962164dfb0f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.3.5"
+#define DRV_VERSION "1.3.33"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2015 Intel Corporation.";
@@ -282,6 +282,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
/**
* i40evf_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
+ * @flush: boolean value whether to run rd32()
**/
void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
{
@@ -305,15 +306,14 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
u32 val;
- u32 ena_mask;
/* handle non-queue interrupts */
- val = rd32(hw, I40E_VFINT_ICR01);
- ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+ rd32(hw, I40E_VFINT_ICR01);
+ rd32(hw, I40E_VFINT_ICR0_ENA1);
- val = rd32(hw, I40E_VFINT_DYN_CTL01);
- val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
+ val = rd32(hw, I40E_VFINT_DYN_CTL01) |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, val);
/* schedule work on the private workqueue */
@@ -334,7 +334,7 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -357,6 +357,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
}
/**
@@ -377,6 +378,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->num_ringpairs++;
q_vector->ring_mask |= BIT(t_idx);
}
@@ -444,6 +446,29 @@ out:
return err;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40evf_netpoll - A Polling 'interrupt' handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts. It's not called while the normal interrupt routine is executing.
+ **/
+static void i40evf_netpoll(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &adapter->vsi.state))
+ return;
+
+ for (i = 0; i < q_vectors; i++)
+ i40evf_msix_clean_rings(0, adapter->q_vector[i]);
+}
+
+#endif
/**
* i40evf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
@@ -489,8 +514,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
q_vector);
if (err) {
dev_info(&adapter->pdev->dev,
- "%s: request_irq failed, error: %d\n",
- __func__, err);
+ "Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
/* assign the mask for this irq */
@@ -731,6 +755,8 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+ if (!VLAN_ALLOWED(adapter))
+ return -EIO;
if (i40evf_add_vlan(adapter, vid) == NULL)
return -ENOMEM;
return 0;
@@ -746,8 +772,11 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- i40evf_del_vlan(adapter, vid);
- return 0;
+ if (VLAN_ALLOWED(adapter)) {
+ i40evf_del_vlan(adapter, vid);
+ return 0;
+ }
+ return -EIO;
}
/**
@@ -837,6 +866,15 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
+ if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
+ return -EPERM;
+
+ f = i40evf_find_filter(adapter, hw->mac.addr);
+ if (f) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ }
+
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -856,6 +894,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
struct i40evf_mac_filter *f, *ftmp;
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
+ struct netdev_hw_addr *ha;
int count = 50;
/* add addr if not already in the filter list */
@@ -877,29 +916,27 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
}
/* remove filter if not in netdev list */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
- bool found = false;
-
- if (is_multicast_ether_addr(f->macaddr)) {
- netdev_for_each_mc_addr(mca, netdev) {
- if (ether_addr_equal(mca->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
- } else {
- netdev_for_each_uc_addr(uca, netdev) {
- if (ether_addr_equal(uca->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
- if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
- found = true;
- }
- if (!found) {
- f->remove = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- }
+ netdev_for_each_mc_addr(mca, netdev)
+ if (ether_addr_equal(mca->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ netdev_for_each_uc_addr(uca, netdev)
+ if (ether_addr_equal(uca->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ for_each_dev_addr(netdev, ha)
+ if (ether_addr_equal(ha->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+ goto bottom_of_search_loop;
+
+ /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+bottom_of_search_loop:
+ continue;
}
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
@@ -1111,6 +1148,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
+ if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
adapter->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
@@ -1165,7 +1204,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
- i40evf_acquire_msix_vectors(adapter, v_budget);
+ err = i40evf_acquire_msix_vectors(adapter, v_budget);
out:
adapter->netdev->real_num_tx_queues = pairs;
@@ -1421,16 +1460,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
struct i40evf_adapter,
watchdog_task);
struct i40e_hw *hw = &adapter->hw;
- uint32_t rstat_val;
+ u32 reg_val;
if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
goto restart_watchdog;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
- rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((rstat_val == I40E_VFR_VFACTIVE) ||
- (rstat_val == I40E_VFR_COMPLETED)) {
+ reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if ((reg_val == I40E_VFR_VFACTIVE) ||
+ (reg_val == I40E_VFR_COMPLETED)) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
adapter->state = __I40EVF_STARTUP;
@@ -1455,11 +1494,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
/* check for reset */
- rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
- (rstat_val != I40E_VFR_VFACTIVE) &&
- (rstat_val != I40E_VFR_COMPLETED)) {
+ reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
adapter->state = __I40EVF_RESETTING;
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
@@ -1573,8 +1609,9 @@ static void i40evf_reset_task(struct work_struct *work)
reset_task);
struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw;
+ struct i40evf_vlan_filter *vlf;
struct i40evf_mac_filter *f;
- uint32_t rstat_val;
+ u32 reg_val;
int i = 0, err;
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
@@ -1595,12 +1632,11 @@ static void i40evf_reset_task(struct work_struct *work)
/* poll until we see the reset actually happen */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
- rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((rstat_val != I40E_VFR_VFACTIVE) &&
- (rstat_val != I40E_VFR_COMPLETED))
+ reg_val = rd32(hw, I40E_VF_ARQLEN1) &
+ I40E_VF_ARQLEN1_ARQENABLE_MASK;
+ if (!reg_val)
break;
- usleep_range(500, 1000);
+ usleep_range(5000, 10000);
}
if (i == I40EVF_RESET_WAIT_COUNT) {
dev_info(&adapter->pdev->dev, "Never saw reset\n");
@@ -1609,21 +1645,21 @@ static void i40evf_reset_task(struct work_struct *work)
/* wait until the reset is complete and the PF is responding to us */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
- rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val == I40E_VFR_VFACTIVE)
+ reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (reg_val == I40E_VFR_VFACTIVE)
break;
msleep(I40EVF_RESET_WAIT_MS);
}
/* extra wait to make sure minimum wait is met */
msleep(I40EVF_RESET_WAIT_MS);
if (i == I40EVF_RESET_WAIT_COUNT) {
- struct i40evf_mac_filter *f, *ftmp;
+ struct i40evf_mac_filter *ftmp;
struct i40evf_vlan_filter *fv, *fvtmp;
/* reset never finished */
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
- rstat_val);
+ reg_val);
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
if (netif_running(adapter->netdev)) {
@@ -1697,8 +1733,8 @@ continue_reset:
f->add = true;
}
/* re-add all VLAN filters */
- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- f->add = true;
+ list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
+ vlf->add = true;
}
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
@@ -1853,8 +1889,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
if (!err)
continue;
dev_err(&adapter->pdev->dev,
- "%s: Allocation for Tx Queue %u failed\n",
- __func__, i);
+ "Allocation for Tx Queue %u failed\n", i);
break;
}
@@ -1881,8 +1916,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
if (!err)
continue;
dev_err(&adapter->pdev->dev,
- "%s: Allocation for Rx Queue %u failed\n",
- __func__, i);
+ "Allocation for Rx Queue %u failed\n", i);
break;
}
return err;
@@ -2041,6 +2075,9 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_tx_timeout = i40evf_tx_timeout,
.ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = i40evf_netpoll,
+#endif
};
/**
@@ -2089,7 +2126,10 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
if (adapter->vf_res->vf_offload_flags
& I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
- netdev->vlan_features = netdev->features;
+ netdev->vlan_features = netdev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -2118,6 +2158,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
adapter->vsi.netdev = adapter->netdev;
+ adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
return 0;
}
@@ -2246,10 +2287,13 @@ static void i40evf_init_task(struct work_struct *work)
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
adapter->hw.mac.addr);
- random_ether_addr(adapter->hw.mac.addr);
+ eth_hw_addr_random(netdev);
+ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+ } else {
+ adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
- ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &i40evf_watchdog_timer;
@@ -2265,6 +2309,9 @@ static void i40evf_init_task(struct work_struct *work)
if (err)
goto err_sw_init;
i40evf_map_rings_to_vectors(adapter);
+ if (adapter->vf_res->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
if (!RSS_AQ(adapter))
i40evf_configure_rss(adapter);
err = i40evf_request_misc_irq(adapter);
@@ -2300,8 +2347,7 @@ static void i40evf_init_task(struct work_struct *work)
}
return;
restart:
- schedule_delayed_work(&adapter->init_task,
- msecs_to_jiffies(50));
+ schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
return;
err_register:
@@ -2314,11 +2360,14 @@ err_alloc:
err:
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
- dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
+ dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- return; /* do not reschedule */
+ i40evf_shutdown_adminq(hw);
+ adapter->state = __I40EVF_STARTUP;
+ schedule_delayed_work(&adapter->init_task, HZ * 5);
+ return;
}
- schedule_delayed_work(&adapter->init_task, HZ * 3);
+ schedule_delayed_work(&adapter->init_task, HZ);
}
/**
@@ -2434,7 +2483,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
- schedule_delayed_work(&adapter->init_task, 10);
+ schedule_delayed_work(&adapter->init_task,
+ msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
return 0;
@@ -2510,6 +2560,7 @@ static int i40evf_resume(struct pci_dev *pdev)
rtnl_lock();
err = i40evf_set_interrupt_capability(adapter);
if (err) {
+ rtnl_unlock();
dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
return err;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index d4eb1a5e7d42..32e620e1eb5c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -156,7 +156,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
+ I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
if (PF_IS_V11(adapter))
@@ -234,8 +235,8 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
@@ -288,8 +289,8 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
@@ -313,8 +314,8 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
@@ -341,8 +342,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
@@ -393,8 +394,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
+ adapter->current_op);
return;
}
list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -410,8 +411,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
@@ -453,8 +453,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
+ adapter->current_op);
return;
}
list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -470,8 +470,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
@@ -513,8 +512,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
+ adapter->current_op);
return;
}
@@ -531,8 +530,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
@@ -572,8 +570,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
+ adapter->current_op);
return;
}
@@ -590,8 +588,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
@@ -629,8 +626,8 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
@@ -720,17 +717,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
default:
- dev_err(&adapter->pdev->dev,
- "%s: Unknown event %d from pf\n",
- __func__, vpe->event);
+ dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
+ vpe->event);
break;
}
return;
}
if (v_retval) {
- dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
- __func__, v_retval,
- i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
+ dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
+ v_retval, i40evf_stat_str(&adapter->hw, v_retval),
+ v_opcode);
}
switch (v_opcode) {
case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -756,6 +752,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
sizeof(struct i40e_virtchnl_vsi_resource);
memcpy(adapter->vf_res, msg, min(msglen, len));
i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+ /* restore current mac address */
+ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
i40evf_process_config(adapter);
}
break;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 212d668dabb3..1a2f1cc44b28 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -444,8 +444,8 @@ struct igb_adapter {
struct ptp_pin_desc sdp_config[IGB_N_SDP];
struct {
- struct timespec start;
- struct timespec period;
+ struct timespec64 start;
+ struct timespec64 period;
} perout[IGB_N_PEROUT];
char fw_version[32];
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 74262768b09b..2529bc625de4 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -842,10 +842,6 @@ static void igb_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = IGB_STATS_LEN;
- drvinfo->testinfo_len = IGB_TEST_LEN;
- drvinfo->regdump_len = igb_get_regs_len(netdev);
- drvinfo->eedump_len = igb_get_eeprom_len(netdev);
}
static void igb_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e174fbbdba40..ea7b09887245 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -151,7 +151,7 @@ static void igb_setup_dca(struct igb_adapter *);
#endif /* CONFIG_IGB_DCA */
static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *);
-static bool igb_clean_rx_irq(struct igb_q_vector *, int);
+static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
@@ -2986,6 +2986,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
}
#endif /* CONFIG_PCI_IOV */
+ /* Assume MSI-X interrupts, will be checked during IRQ allocation */
+ adapter->flags |= IGB_FLAG_HAS_MSIX;
+
igb_probe_vfs(adapter);
igb_init_queue_configuration(adapter);
@@ -5389,7 +5392,7 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct ptp_clock_event event;
- struct timespec ts;
+ struct timespec64 ts;
u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
if (tsicr & TSINTR_SYS_WRAP) {
@@ -5409,10 +5412,11 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter)
if (tsicr & TSINTR_TT0) {
spin_lock(&adapter->tmreg_lock);
- ts = timespec_add(adapter->perout[0].start,
- adapter->perout[0].period);
+ ts = timespec64_add(adapter->perout[0].start,
+ adapter->perout[0].period);
+ /* u32 conversion of tv_sec is safe until y2106 */
wr32(E1000_TRGTTIML0, ts.tv_nsec);
- wr32(E1000_TRGTTIMH0, ts.tv_sec);
+ wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT0;
wr32(E1000_TSAUXC, tsauxc);
@@ -5423,10 +5427,10 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter)
if (tsicr & TSINTR_TT1) {
spin_lock(&adapter->tmreg_lock);
- ts = timespec_add(adapter->perout[1].start,
- adapter->perout[1].period);
+ ts = timespec64_add(adapter->perout[1].start,
+ adapter->perout[1].period);
wr32(E1000_TRGTTIML1, ts.tv_nsec);
- wr32(E1000_TRGTTIMH1, ts.tv_sec);
+ wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT1;
wr32(E1000_TSAUXC, tsauxc);
@@ -6360,6 +6364,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
struct igb_q_vector,
napi);
bool clean_complete = true;
+ int work_done = 0;
#ifdef CONFIG_IGB_DCA
if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -6368,15 +6373,19 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (q_vector->tx.ring)
clean_complete = igb_clean_tx_irq(q_vector);
- if (q_vector->rx.ring)
- clean_complete &= igb_clean_rx_irq(q_vector, budget);
+ if (q_vector->rx.ring) {
+ int cleaned = igb_clean_rx_irq(q_vector, budget);
+
+ work_done += cleaned;
+ clean_complete &= (cleaned < budget);
+ }
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
/* If not enough Rx work done, exit the polling mode */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
igb_ring_irq_enable(q_vector);
return 0;
@@ -6900,7 +6909,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
@@ -6974,7 +6983,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
- return total_packets < budget;
+ return total_packets;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5982f28d521a..c44df87c38de 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -143,7 +143,7 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter,
* sub-nanosecond resolution.
*/
wr32(E1000_SYSTIML, ts->tv_nsec);
- wr32(E1000_SYSTIMH, ts->tv_sec);
+ wr32(E1000_SYSTIMH, (u32)ts->tv_sec);
}
/**
@@ -479,7 +479,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
struct e1000_hw *hw = &igb->hw;
u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
unsigned long flags;
- struct timespec ts;
+ struct timespec64 ts;
int use_freq = 0, pin = -1;
s64 ns;
@@ -523,14 +523,14 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
}
ts.tv_sec = rq->perout.period.sec;
ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec_to_ns(&ts);
+ ns = timespec64_to_ns(&ts);
ns = ns >> 1;
if (on && ns <= 70000000LL) {
if (ns < 8LL)
return -EINVAL;
use_freq = 1;
}
- ts = ns_to_timespec(ns);
+ ts = ns_to_timespec64(ns);
if (rq->perout.index == 1) {
if (use_freq) {
tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index c6996feb1cb4..b74ce53d7b52 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -196,8 +196,6 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = igbvf_get_regs_len(netdev);
- drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
}
static void igbvf_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 686fa7184179..297af801f051 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1211,7 +1211,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
/* If not enough Rx work done, exit the polling mode */
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (adapter->requested_itr & 3)
igbvf_set_itr(adapter);
@@ -2615,6 +2615,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_poll_controller = igbvf_netpoll,
#endif
.ndo_set_features = igbvf_set_features,
+ .ndo_features_check = passthru_features_check,
};
/**
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index b311e9e710d2..d2b29b490ae0 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -479,9 +479,6 @@ ixgb_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = IXGB_STATS_LEN;
- drvinfo->regdump_len = ixgb_get_regs_len(netdev);
- drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
}
static void
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index edf1fb913209..1d2174526a4c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -152,9 +152,17 @@ struct vf_data_storage {
u16 vlan_count;
u8 spoofchk_enabled;
bool rss_query_enabled;
+ u8 trusted;
+ int xcast_mode;
unsigned int vf_api;
};
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
struct vf_macvlans {
struct list_head l;
int vf;
@@ -539,8 +547,7 @@ struct hwmon_buff {
#define IXGBE_MIN_RSC_ITR 24
#define IXGBE_100K_ITR 40
#define IXGBE_20K_ITR 200
-#define IXGBE_10K_ITR 400
-#define IXGBE_8K_ITR 500
+#define IXGBE_12K_ITR 336
/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
@@ -595,6 +602,7 @@ struct ixgbe_mac_addr {
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
/* board specific private data structure */
struct ixgbe_adapter {
@@ -708,6 +716,7 @@ struct ixgbe_adapter {
u32 link_speed;
bool link_up;
+ unsigned long sfp_poll_time;
unsigned long link_check_timeout;
struct timer_list service_timer;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index dd7062fed61a..a39afcf03e2c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -44,9 +44,8 @@
static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+static void
+ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
@@ -109,6 +108,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if (hw->phy.multispeed_fiber) {
/* Set up dual speed SFP+ support */
mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_hard_rate_select_speed;
} else {
if ((mac->ops.get_media_type(hw) ==
ixgbe_media_type_backplane) &&
@@ -646,176 +648,32 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * ixgbe_set_hard_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
*
- * Set the link speed in the AUTOC register and restarts link.
- **/
-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ * Set module link speed via RS0/RS1 rate select pins.
+ */
+static void
+ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
{
- s32 status = 0;
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- u32 speedcnt = 0;
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
- u32 i = 0;
- bool link_up = false;
- bool autoneg = false;
-
- /* Mask off requested but non-supported speeds */
- status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
- &autoneg);
- if (status != 0)
- return status;
-
- speed &= link_speed;
-
- /*
- * Try each speed one by one, highest priority first. We do this in
- * software because 10gb fiber doesn't support speed autonegotiation.
- */
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- speedcnt++;
- highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status != 0)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber:
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- break;
- case ixgbe_media_type_fiber_qsfp:
- /* QSFP module automatically detects MAC link speed */
- break;
- default:
- hw_dbg(hw, "Unexpected media type.\n");
- break;
- }
-
- /* Allow module to change analog characteristics (1G->10G) */
- msleep(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_10GB_FULL,
- autoneg_wait_to_complete);
- if (status != 0)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- if (hw->mac.ops.flap_tx_laser)
- hw->mac.ops.flap_tx_laser(hw);
-
- /*
- * Wait for the controller to acquire link. Per IEEE 802.3ap,
- * Section 73.10.2, we may have to wait up to 500ms if KR is
- * attempted. 82599 uses the same timing for 10g SFI.
- */
- for (i = 0; i < 5; i++) {
- /* Wait for the link partner to also set speed */
- msleep(100);
-
- /* If we have link, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false);
- if (status != 0)
- return status;
-
- if (link_up)
- goto out;
- }
- }
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- speedcnt++;
- if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
- highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status != 0)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber:
- esdp_reg &= ~IXGBE_ESDP_SDP5;
- esdp_reg |= IXGBE_ESDP_SDP5_DIR;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- break;
- case ixgbe_media_type_fiber_qsfp:
- /* QSFP module automatically detects MAC link speed */
- break;
- default:
- hw_dbg(hw, "Unexpected media type.\n");
- break;
- }
-
- /* Allow module to change analog characteristics (10G->1G) */
- msleep(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_1GB_FULL,
- autoneg_wait_to_complete);
- if (status != 0)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- if (hw->mac.ops.flap_tx_laser)
- hw->mac.ops.flap_tx_laser(hw);
- /* Wait for the link partner to also set speed */
- msleep(100);
-
- /* If we have link, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status != 0)
- return status;
-
- if (link_up)
- goto out;
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ break;
+ default:
+ hw_dbg(hw, "Invalid fixed module speed\n");
+ return;
}
- /*
- * We didn't get link. Configure back to the highest speed we tried,
- * (if there was more than one). We call ourselves back with just the
- * single highest speed that the user requested.
- */
- if (speedcnt > 1)
- status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed,
- autoneg_wait_to_complete);
-
-out:
- /* Set autoneg_advertised value based on input link speed */
- hw->phy.autoneg_advertised = 0;
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
- return status;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
}
/**
@@ -1766,6 +1624,16 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+ /* also use it for SCTP */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+ break;
+ default:
+ break;
+ }
+
/* store source and destination IP masks (big-enian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
~input_mask->formatted.src_ip[0]);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3f56a8080118..ce61b36b94f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -297,13 +297,13 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Setup flow control */
ret_val = ixgbe_setup_fc(hw);
- if (!ret_val)
- return 0;
+ if (ret_val)
+ return ret_val;
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
- return ret_val;
+ return 0;
}
/**
@@ -2164,10 +2164,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
/*
* In order to prevent Tx hangs when the internal Tx
* switch is enabled we must set the high water mark
- * to the maximum FCRTH value. This allows the Tx
- * switch to function even under heavy Rx workloads.
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
*/
- fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
}
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
@@ -2476,6 +2477,9 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ return 0;
+
/*
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
@@ -3920,3 +3924,213 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
fwsm &= IXGBE_FWSM_MODE_MASK;
return fwsm == IXGBE_FWSM_FW_MODE_PT;
}
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the MAC and/or PHY register and restarts link.
+ */
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ s32 status = 0;
+ u32 speedcnt = 0;
+ u32 i = 0;
+ bool autoneg, link_up = false;
+
+ /* Mask off requested but non-supported speeds */
+ status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
+ if (status)
+ return status;
+
+ speed &= link_speed;
+
+ /* Try each speed one by one, highest priority first. We do this in
+ * software because 10Gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status)
+ return status;
+
+ if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ hw->mac.ops.set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_10GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msleep(40);
+
+ status = hw->mac.ops.setup_mac_link(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg_wait_to_complete);
+ if (status)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
+
+ /* Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (status)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status)
+ return status;
+
+ if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ hw->mac.ops.set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_1GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msleep(40);
+
+ status = hw->mac.ops.setup_mac_link(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg_wait_to_complete);
+ if (status)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+ highest_link_speed,
+ autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_soft_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via the soft rate select.
+ */
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ hw_dbg(hw, "Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
+ return;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
+ return;
+ }
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2f779f35dc4f..a0044e4a8b90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -135,6 +135,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed);
#define IXGBE_FAILED_READ_REG 0xffffffffU
#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 3b932fe64ab6..23277ab153b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -259,7 +259,13 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
- reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+ /* In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index ab2edc8e7703..d681273bd39d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -943,9 +943,6 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = IXGBE_STATS_LEN;
- drvinfo->testinfo_len = IXGBE_TEST_LEN;
- drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
}
static void ixgbe_get_ringparam(struct net_device *netdev,
@@ -2286,7 +2283,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_itr_setting = ec->tx_coalesce_usecs;
if (adapter->tx_itr_setting == 1)
- tx_itr_param = IXGBE_10K_ITR;
+ tx_itr_param = IXGBE_12K_ITR;
else
tx_itr_param = adapter->tx_itr_setting;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 68e1e757ecef..f3168bcc7d87 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -866,7 +866,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if (txr_count && !rxr_count) {
/* tx only vector */
if (adapter->tx_itr_setting == 1)
- q_vector->itr = IXGBE_10K_ITR;
+ q_vector->itr = IXGBE_12K_ITR;
else
q_vector->itr = adapter->tx_itr_setting;
} else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 63b2cfe9416b..47395ff5d908 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -79,7 +79,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "4.0.1-k"
+#define DRV_VERSION "4.2.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2015 Intel Corporation.";
@@ -137,6 +137,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
/* required last entry */
{0, }
};
@@ -1244,9 +1245,12 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
int cpu)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+ u32 txctrl = 0;
u16 reg_offset;
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
@@ -1278,9 +1282,11 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
int cpu)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
+ u32 rxctrl = 0;
u8 reg_idx = rx_ring->reg_idx;
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ rxctrl = dca3_get_tag(rx_ring->dev, cpu);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
@@ -1297,6 +1303,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
* which will cause the DCA tag to be cleared.
*/
rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_DCA_EN |
IXGBE_DCA_RXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
@@ -1326,11 +1333,13 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
int i;
- if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
- return;
-
/* always use CB2 mode, difference is masked in the CB driver */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_MODE_CB2);
+ else
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_DISABLE);
for (i = 0; i < adapter->num_q_vectors; i++) {
adapter->q_vector[i]->cpu = -1;
@@ -1353,7 +1362,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
break;
if (dca_add_requester(dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
- ixgbe_setup_dca(adapter);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_MODE_CB2);
break;
}
/* Fall Through since DCA is disabled. */
@@ -1361,7 +1371,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
dca_remove_requester(dev);
adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_DISABLE);
}
break;
}
@@ -2261,7 +2272,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
/* simple throttlerate management
* 0-10MB/s lowest (100000 ints/s)
* 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (8000 ints/s)
+ * 20-1249MB/s bulk (12000 ints/s)
*/
/* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2;
@@ -2350,7 +2361,7 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
new_itr = IXGBE_20K_ITR;
break;
case bulk_latency:
- new_itr = IXGBE_8K_ITR;
+ new_itr = IXGBE_12K_ITR;
break;
default:
break;
@@ -2495,17 +2506,27 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
+
+ if (!ixgbe_is_sfp(hw))
+ return;
- if (eicr & IXGBE_EICR_GPI_SDP2(hw)) {
+ /* Later MAC's use different SDP */
+ if (hw->mac.type >= ixgbe_mac_X540)
+ eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
+
+ if (eicr & eicr_mask) {
/* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw));
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+ adapter->sfp_poll_time = 0;
ixgbe_service_event_schedule(adapter);
}
}
- if (eicr & IXGBE_EICR_GPI_SDP1(hw)) {
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
+ (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
/* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
@@ -2622,6 +2643,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
@@ -2752,7 +2775,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring;
- int per_ring_budget;
+ int per_ring_budget, work_done = 0;
bool clean_complete = true;
#ifdef CONFIG_IXGBE_DCA
@@ -2773,9 +2796,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
- ixgbe_for_each_ring(ring, q_vector->rx)
- clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
- per_ring_budget) < per_ring_budget);
+ ixgbe_for_each_ring(ring, q_vector->rx) {
+ int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ work_done += cleaned;
+ clean_complete &= (cleaned < per_ring_budget);
+ }
ixgbe_qv_unlock_napi(q_vector);
/* If all work not completed, return budget and keep polling */
@@ -2783,7 +2810,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
return budget;
/* all work done, exit the polling mode */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -3700,14 +3727,20 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
adapter->num_vfs);
- /* Ensure LLDP is set for Ethertype Antispoofing if we will be
+ /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
* calling set_ethertype_anti_spoofing for each VF in loop below
*/
- if (hw->mac.ops.set_ethertype_anti_spoofing)
+ if (hw->mac.ops.set_ethertype_anti_spoofing) {
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
- (IXGBE_ETQF_FILTER_EN | /* enable filter */
- IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */
- IXGBE_ETH_P_LLDP)); /* LLDP eth type */
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETH_P_LLDP));
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ ETH_P_PAUSE));
+ }
/* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
@@ -3777,8 +3810,6 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
switch (hw->mac.type) {
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
case ixgbe_mac_82598EB:
/*
* For VMDq support of different descriptor types or
@@ -3792,6 +3823,11 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
*/
rdrxctl |= IXGBE_RDRXCTL_MVMEN;
break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ if (adapter->num_vfs)
+ rdrxctl |= IXGBE_RDRXCTL_PSP;
+ /* fall through for older HW */
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
/* Disable RSC for ACK packets */
@@ -4767,6 +4803,12 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
break;
}
+#ifdef CONFIG_IXGBE_DCA
+ /* configure DCA */
+ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
+ ixgbe_setup_dca(adapter);
+#endif /* CONFIG_IXGBE_DCA */
+
#ifdef IXGBE_FCOE
/* configure FCoE L2 filters, redirection table, and Rx control */
ixgbe_configure_fcoe(adapter);
@@ -4793,6 +4835,7 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+ adapter->sfp_poll_time = 0;
}
/**
@@ -4883,9 +4926,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_82599EB:
gpie |= IXGBE_SDP0_GPIEN_8259X;
break;
- case ixgbe_mac_X540:
- gpie |= IXGBE_EIMS_TS;
- break;
default:
break;
}
@@ -4895,9 +4935,15 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
gpie |= IXGBE_SDP1_GPIEN(hw);
- if (hw->mac.type == ixgbe_mac_82599EB) {
- gpie |= IXGBE_SDP1_GPIEN_8259X;
- gpie |= IXGBE_SDP2_GPIEN_8259X;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
+ break;
+ case ixgbe_mac_X550EM_x:
+ gpie |= IXGBE_SDP0_GPIEN_X540;
+ break;
+ default:
+ break;
}
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -5220,11 +5266,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
-
-#ifdef CONFIG_IXGBE_DCA
- /* since we reset the hardware DCA settings were cleared */
- ixgbe_setup_dca(adapter);
-#endif
}
/**
@@ -5270,7 +5311,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->atr_sample_rate = 20;
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
@@ -5296,7 +5336,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
@@ -6692,10 +6731,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
+ if (adapter->sfp_poll_time &&
+ time_after(adapter->sfp_poll_time, jiffies))
+ return; /* If not yet time to poll for SFP */
+
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
+ adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
+
err = hw->phy.ops.identify_sfp(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
goto sfp_out;
@@ -8362,6 +8407,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
+ .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
#ifdef CONFIG_IXGBE_DCB
@@ -8695,8 +8741,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.reset_if_overtemp = true;
err = hw->mac.ops.reset_hw(hw);
hw->phy.reset_if_overtemp = false;
- if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
- hw->mac.type == ixgbe_mac_82598EB) {
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
@@ -9008,7 +9053,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
dca_remove_requester(&pdev->dev);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_DISABLE);
}
#endif
@@ -9019,12 +9065,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
/* remove the added san mac */
ixgbe_del_sanmac_netdev(netdev);
- if (netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(netdev);
-
#ifdef CONFIG_PCI_IOV
ixgbe_disable_sriov(adapter);
#endif
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index b1e4703ff2a5..8daa95f74548 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -102,6 +102,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 597d0b1c2370..fb8673d63806 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -100,16 +100,17 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
}
/**
- * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to read from
* @reg: I2C device register to read from
* @val: pointer to location to receive read value
+ * @lock: true if to take and release semaphore
*
* Returns an error code on error.
- **/
-s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 *val)
+ */
+static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
int max_retry = 10;
@@ -124,7 +125,7 @@ s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
csum = ~csum;
do {
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
@@ -157,13 +158,15 @@ s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
if (ixgbe_clock_out_i2c_bit(hw, false))
goto fail;
ixgbe_i2c_stop(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
*val = (high_bits << 8) | low_bits;
return 0;
fail:
ixgbe_i2c_bus_clear(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read combined error - Retry.\n");
@@ -175,17 +178,49 @@ fail:
}
/**
- * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to write to
* @reg: I2C device register to write to
* @val: value to write
+ * @lock: true if to take and release semaphore
*
* Returns an error code on error.
- **/
-s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
- u8 addr, u16 reg, u16 val)
+ */
+static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 val, bool lock)
{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
int max_retry = 1;
int retry = 0;
u8 reg_high;
@@ -197,6 +232,8 @@ s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
csum = ~csum;
do {
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -217,10 +254,14 @@ s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
if (ixgbe_out_i2c_byte_ack(hw, csum))
goto fail;
ixgbe_i2c_stop(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
return 0;
fail:
ixgbe_i2c_bus_clear(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte write combined error - Retry.\n");
@@ -232,6 +273,36 @@ fail:
}
/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
* ixgbe_identify_phy_generic - Get physical layer module
* @hw: pointer to hardware structure
*
@@ -1100,6 +1171,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
return IXGBE_ERR_SFP_NOT_PRESENT;
}
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER,
&identifier);
@@ -1107,9 +1181,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (status)
goto err_read_i2c_eeprom;
- /* LAN ID is needed for sfp_type determination */
- hw->mac.ops.set_lan_id(hw);
-
if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1159,7 +1230,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type = ixgbe_sfp_type_lr;
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
+ } else {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
@@ -1660,26 +1731,46 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
}
/**
- * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * ixgbe_is_sfp_probe - Returns true if SFP is being detected
+ * @hw: pointer to hardware structure
+ * @offset: eeprom offset to be read
+ * @addr: I2C address to be read
+ */
+static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
+{
+ if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
+ offset == IXGBE_SFF_IDENTIFIER &&
+ hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return true;
+ return false;
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
* @data: value read
+ * @lock: true if to take and release semaphore
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
- **/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+ */
+static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data, bool lock)
{
s32 status;
u32 max_retry = 10;
u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
bool nack = true;
+
+ if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
+ max_retry = IXGBE_SFP_DETECT_RETRIES;
+
*data = 0;
do {
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
@@ -1721,12 +1812,16 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
goto fail;
ixgbe_i2c_stop(hw);
- break;
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return 0;
fail:
ixgbe_i2c_bus_clear(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
- msleep(100);
+ if (lock) {
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msleep(100);
+ }
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n");
@@ -1735,29 +1830,60 @@ fail:
} while (retry < max_retry);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
return status;
}
/**
- * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ */
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, true);
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ */
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, false);
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
* @data: value to write
+ * @lock: true if to take and release semaphore
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
- **/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+ */
+static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data, bool lock)
{
s32 status;
u32 max_retry = 1;
u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
do {
@@ -1788,7 +1914,9 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
goto fail;
ixgbe_i2c_stop(hw);
- break;
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return 0;
fail:
ixgbe_i2c_bus_clear(hw);
@@ -1799,21 +1927,57 @@ fail:
hw_dbg(hw, "I2C byte write error.\n");
} while (retry < max_retry);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
return status;
}
/**
+ * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ */
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, true);
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ */
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, false);
+}
+
+/**
* ixgbe_i2c_start - Sets I2C start condition
* @hw: pointer to hardware structure
*
* Sets I2C start condition (High -> Low on SDA while SCL is High)
+ * Set bit-bang mode on X550 hardware.
**/
static void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ i2cctl |= IXGBE_I2C_BB_EN(hw);
+
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -1838,10 +2002,15 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ * Disables bit-bang mode and negates data output enable on X550
+ * hardware.
**/
static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
+ u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
/* Stop condition must begin with data low and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1854,6 +2023,13 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
/* bus free time between stop and start (4.7us)*/
udelay(IXGBE_I2C_T_BUF);
+
+ if (bb_en_bit || data_oe_bit || clk_oe_bit) {
+ i2cctl &= ~bb_en_bit;
+ i2cctl |= data_oe_bit | clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
}
/**
@@ -1868,6 +2044,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
s32 i;
bool bit = false;
+ *data = 0;
for (i = 7; i >= 0; i--) {
ixgbe_clock_in_i2c_bit(hw, &bit);
*data |= bit << i;
@@ -1901,6 +2078,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
/* Release SDA line (set high) */
i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+ i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
@@ -1915,15 +2093,21 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
**/
static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
s32 status = 0;
u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 timeout = 10;
bool ack = true;
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
ixgbe_raise_i2c_clk(hw, &i2cctl);
-
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
@@ -1961,7 +2145,14 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
@@ -2016,13 +2207,20 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
* @i2cctl: Current value of I2CCTL register
*
* Raises the I2C clock line '0'->'1'
+ * Negates the I2C clock output enable on X550 hardware.
**/
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
u32 i = 0;
u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
u32 i2cctl_r = 0;
+ if (clk_oe_bit) {
+ *i2cctl |= clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
+ }
+
for (i = 0; i < timeout; i++) {
*i2cctl |= IXGBE_I2C_CLK_OUT(hw);
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
@@ -2042,11 +2240,13 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* @i2cctl: Current value of I2CCTL register
*
* Lowers the I2C clock line '1'->'0'
+ * Asserts the I2C clock output enable on X550 hardware.
**/
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
*i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
+ *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
@@ -2062,13 +2262,17 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* @data: I2C data value (0 or 1) to set
*
* Sets the I2C data bit
+ * Asserts the I2C data output enable on X550 hardware.
**/
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+
if (data)
*i2cctl |= IXGBE_I2C_DATA_OUT(hw);
else
*i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
+ *i2cctl &= ~data_oe_bit;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
@@ -2076,6 +2280,14 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+ if (!data) /* Can't verify data in this case */
+ return 0;
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
/* Verify data was set correctly */
*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
@@ -2092,9 +2304,19 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
* @i2cctl: Current value of I2CCTL register
*
* Returns the I2C data bit value
+ * Negates the I2C data output enable on X550 hardware.
**/
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(IXGBE_I2C_T_FALL);
+ }
+
if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
return true;
return false;
@@ -2109,10 +2331,11 @@ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
**/
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 i2cctl;
u32 i;
ixgbe_i2c_start(hw);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
ixgbe_set_i2c_data(hw, &i2cctl, 1);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index e45988c4dad5..5abd66c84d00 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -66,6 +66,9 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
@@ -78,9 +81,29 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
#define IXGBE_CS4227 0xBE /* CS4227 address */
-#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
+#define IXGBE_CS4227_SCRATCH 2
+#define IXGBE_CS4227_RESET_PENDING 0x1357
+#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
+#define IXGBE_CS4227_RETRIES 15
+#define IXGBE_CS4227_EFUSE_STATUS 0x0181
+#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to set speed */
+#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to set EDC */
+#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to set speed */
+#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */
+#define IXGBE_CS4227_EEPROM_STATUS 0x5001
+#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001
+#define IXGBE_CS4227_SPEED_1G 0x8000
+#define IXGBE_CS4227_SPEED_10G 0
#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
#define IXGBE_CS4227_EDC_MODE_SR 0x0004
+#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008
+#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */
+#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */
+#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */
+#define IXGBE_PE 0xE0 /* Port expander addr */
+#define IXGBE_PE_OUTPUT 1 /* Output reg offset */
+#define IXGBE_PE_CONFIG 3 /* Config reg offset */
+#define IXGBE_PE_BIT1 (1 << 1)
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
@@ -109,6 +132,8 @@
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
+#define IXGBE_SFP_DETECT_RETRIES 2
+
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
@@ -154,8 +179,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
@@ -164,6 +193,10 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val);
+s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val);
s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val);
+s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 val);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1d17b5872dd1..fcd8b27a0ccb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -116,6 +116,12 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* we want to disable the querying by default.
*/
adapter->vfinfo[i].rss_query_enabled = 0;
+
+ /* Untrust all VFs */
+ adapter->vfinfo[i].trusted = false;
+
+ /* set the default xcast mode */
+ adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
}
return 0;
@@ -1001,6 +1007,59 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
return 0;
}
+static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int xcast_mode = msgbuf[1];
+ u32 vmolr, disable, enable;
+
+ /* verify the PF is supporting the correct APIs */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_12:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
+ !adapter->vfinfo[vf].trusted) {
+ xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+ }
+
+ if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
+ goto out;
+
+ switch (xcast_mode) {
+ case IXGBEVF_XCAST_MODE_NONE:
+ disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+ enable = 0;
+ break;
+ case IXGBEVF_XCAST_MODE_MULTI:
+ disable = IXGBE_VMOLR_MPE;
+ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
+ break;
+ case IXGBEVF_XCAST_MODE_ALLMULTI:
+ disable = 0;
+ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ adapter->vfinfo[vf].xcast_mode = xcast_mode;
+
+out:
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1063,6 +1122,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_GET_RSS_KEY:
retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_UPDATE_XCAST_MODE:
+ retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
+ break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = IXGBE_ERR_MBX;
@@ -1124,6 +1186,17 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
}
+static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ping;
+
+ ping = IXGBE_PF_CONTROL_MSG;
+ if (adapter->vfinfo[vf].clear_to_send)
+ ping |= IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, vf);
+}
+
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1416,6 +1489,28 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
return 0;
}
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ /* nothing to do */
+ if (adapter->vfinfo[vf].trusted == setting)
+ return 0;
+
+ adapter->vfinfo[vf].trusted = setting;
+
+ /* reset VF to reconfigure features */
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_ping_vf(adapter, vf);
+
+ e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
+
+ return 0;
+}
+
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
@@ -1430,5 +1525,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
ivi->qos = adapter->vfinfo[vf].pf_qos;
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
+ ivi->trusted = adapter->vfinfo[vf].trusted;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 2c197e6d1fe7..dad925706f4c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -49,6 +49,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting);
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 63689192b149..995f03107eac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -402,6 +402,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FDIRSIP4M 0x0EE40
#define IXGBE_FDIRTCPM 0x0EE44
#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRSCTPM 0x0EE78
#define IXGBE_FDIRIP6M 0x0EE74
#define IXGBE_FDIRM 0x0EE70
@@ -1192,6 +1193,7 @@ struct ixgbe_thermal_sensor_data {
/* RDRXCTL Bit Masks */
#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad small packet */
#define IXGBE_RDRXCTL_MVMEN 0x00000020
#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
@@ -1750,6 +1752,9 @@ enum {
* FCoE (0x8906): Filter 2
* 1588 (0x88f7): Filter 3
* FIP (0x8914): Filter 4
+ * LLDP (0x88CC): Filter 5
+ * LACP (0x8809): Filter 6
+ * FC (0x8808): Filter 7
*/
#define IXGBE_ETQF_FILTER_EAPOL 0
#define IXGBE_ETQF_FILTER_FCOE 2
@@ -1757,6 +1762,7 @@ enum {
#define IXGBE_ETQF_FILTER_FIP 4
#define IXGBE_ETQF_FILTER_LLDP 5
#define IXGBE_ETQF_FILTER_LACP 6
+#define IXGBE_ETQF_FILTER_FC 7
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
@@ -1948,6 +1954,7 @@ enum {
#define IXGBE_GSSR_SW_MNG_SM 0x0400
#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
#define IXGBE_GSSR_I2C_MASK 0x1800
+#define IXGBE_GSSR_NVM_PHY_MASK 0xF
/* FW Status register bitmask */
#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
@@ -3255,9 +3262,11 @@ struct ixgbe_mac_operations {
void (*flap_tx_laser)(struct ixgbe_hw *);
void (*stop_link_on_d3)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
+ void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
/* Packet Buffer Manipulation */
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
@@ -3328,6 +3337,10 @@ struct ixgbe_phy_operations {
s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
s32 (*enter_lplu)(struct ixgbe_hw *);
s32 (*handle_lasi)(struct ixgbe_hw *hw);
+ s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 *value);
+ s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 value);
};
struct ixgbe_eeprom_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 4e758435ece8..c1d4584f6469 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -567,19 +567,25 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
**/
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
- u32 swfw_sync;
- u32 swmask = mask;
- u32 fwmask = mask << 5;
- u32 hwmask = 0;
+ u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
+ u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
+ u32 fwmask = swmask << 5;
u32 timeout = 200;
+ u32 hwmask = 0;
+ u32 swfw_sync;
u32 i;
- if (swmask == IXGBE_GSSR_EEP_SM)
+ if (swmask & IXGBE_GSSR_EEP_SM)
hwmask = IXGBE_GSSR_FLASH_SM;
+ /* SW only mask does not have FW bit pair */
+ if (mask & IXGBE_GSSR_SW_MNG_SM)
+ swmask |= IXGBE_GSSR_SW_MNG_SM;
+
+ swmask |= swi2c_mask;
+ fwmask |= swi2c_mask << 2;
for (i = 0; i < timeout; i++) {
- /*
- * SW NVM semaphore bit is used for access to all
+ /* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
@@ -590,39 +596,56 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- break;
- } else {
- /*
- * Firmware currently using resource (fwmask),
- * hardware currently using resource (hwmask),
- * or other software thread currently using
- * resource (swmask)
- */
- ixgbe_release_swfw_sync_semaphore(hw);
- usleep_range(5000, 10000);
+ usleep_range(5000, 6000);
+ return 0;
}
+ /* Firmware currently using resource (fwmask), hardware
+ * currently using resource (hwmask), or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ usleep_range(5000, 10000);
}
- /*
- * If the resource is not released by the FW/HW the SW can assume that
- * the FW/HW malfunctions. In that case the SW should sets the
- * SW bit(s) of the requested resource(s) while ignoring the
- * corresponding FW/HW bits in the SW_FW_SYNC register.
- */
- if (i >= timeout) {
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
- if (swfw_sync & (fwmask | hwmask)) {
- if (ixgbe_get_swfw_sync_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ /* Failed to get SW only semaphore */
+ if (swmask == IXGBE_GSSR_SW_MNG_SM) {
+ hw_dbg(hw, "Failed to get SW only semaphore\n");
+ return IXGBE_ERR_SWFW_SYNC;
+ }
- swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
- ixgbe_release_swfw_sync_semaphore(hw);
- }
+ /* If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
+ * of the requested resource(s) while ignoring the corresponding FW/HW
+ * bits in the SW_FW_SYNC register.
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
+ if (swfw_sync & (fwmask | hwmask)) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ usleep_range(5000, 6000);
+ return 0;
}
+ /* If the resource is not released by other SW the SW can assume that
+ * the other SW malfunctions. In that case the SW should clear all SW
+ * flags that it does not own and then repeat the whole process once
+ * again.
+ */
+ if (swfw_sync & swmask) {
+ u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+
+ if (swi2c_mask)
+ rmask |= IXGBE_GSSR_I2C_MASK;
+ ixgbe_release_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ixgbe_release_swfw_sync_semaphore(hw);
- usleep_range(5000, 10000);
- return 0;
+ return IXGBE_ERR_SWFW_SYNC;
}
/**
@@ -635,9 +658,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
**/
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
+ u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
u32 swfw_sync;
- u32 swmask = mask;
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ swmask |= mask & IXGBE_GSSR_I2C_MASK;
ixgbe_get_swfw_sync_semaphore(hw);
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
@@ -645,7 +670,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- usleep_range(5000, 10000);
+ usleep_range(5000, 6000);
}
/**
@@ -686,6 +711,11 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
usleep_range(50, 100);
}
+ /* Release semaphores and return error if SW NVM semaphore
+ * was not granted because we do not have access to the EEPROM
+ */
+ hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n");
+ ixgbe_release_swfw_sync_semaphore(hw);
return IXGBE_ERR_EEPROM;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 9fe9445cd73b..ebe0ac950b14 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -56,6 +56,292 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
+/**
+ * ixgbe_read_cs4227 - Read CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: pointer to receive value read
+ *
+ * Returns status code
+ */
+static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+{
+ return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
+ value);
+}
+
+/**
+ * ixgbe_write_cs4227 - Write CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write to register
+ *
+ * Returns status code
+ */
+static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+{
+ return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
+ value);
+}
+
+/**
+ * ixgbe_check_cs4227_reg - Perform diag on a CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: the register to check
+ *
+ * Performs a diagnostic on a register in the CS4227 chip. Returns an error
+ * if it is not operating correctly.
+ * This function assumes that the caller has acquired the proper semaphore.
+ */
+static s32 ixgbe_check_cs4227_reg(struct ixgbe_hw *hw, u16 reg)
+{
+ s32 status;
+ u32 retry;
+ u16 reg_val;
+
+ reg_val = (IXGBE_CS4227_EDC_MODE_DIAG << 1) | 1;
+ status = ixgbe_write_cs4227(hw, reg, reg_val);
+ if (status)
+ return status;
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ msleep(IXGBE_CS4227_CHECK_DELAY);
+ reg_val = 0xFFFF;
+ ixgbe_read_cs4227(hw, reg, &reg_val);
+ if (!reg_val)
+ break;
+ }
+ if (reg_val) {
+ hw_err(hw, "CS4227 reg 0x%04X failed diagnostic\n", reg);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_cs4227_status - Return CS4227 status
+ * @hw: pointer to hardware structure
+ *
+ * Performs a diagnostic on the CS4227 chip. Returns an error if it is
+ * not operating correctly.
+ * This function assumes that the caller has acquired the proper semaphore.
+ */
+static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 value = 0;
+
+ /* Exit if the diagnostic has already been performed. */
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+ if (status)
+ return status;
+ if (value == IXGBE_CS4227_RESET_COMPLETE)
+ return 0;
+
+ /* Check port 0. */
+ status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB);
+ if (status)
+ return status;
+
+ status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB);
+ if (status)
+ return status;
+
+ /* Check port 1. */
+ status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB +
+ (1 << 12));
+ if (status)
+ return status;
+
+ return ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB +
+ (1 << 12));
+}
+
+/**
+ * ixgbe_read_pe - Read register from port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to read
+ * @value: pointer to receive read value
+ *
+ * Returns status code
+ */
+static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+{
+ s32 status;
+
+ status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
+ if (status)
+ hw_err(hw, "port expander access failed with %d\n", status);
+ return status;
+}
+
+/**
+ * ixgbe_write_pe - Write register to port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write
+ *
+ * Returns status code
+ */
+static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+{
+ s32 status;
+
+ status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
+ value);
+ if (status)
+ hw_err(hw, "port expander access failed with %d\n", status);
+ return status;
+}
+
+/**
+ * ixgbe_reset_cs4227 - Reset CS4227 using port expander
+ * @hw: pointer to hardware structure
+ *
+ * This function assumes that the caller has acquired the proper semaphore.
+ * Returns error code
+ */
+static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 retry;
+ u16 value;
+ u8 reg;
+
+ /* Trigger hard reset. */
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status)
+ return status;
+ reg |= IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status)
+ return status;
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, &reg);
+ if (status)
+ return status;
+ reg &= ~IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
+ if (status)
+ return status;
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status)
+ return status;
+ reg &= ~IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status)
+ return status;
+
+ usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100);
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status)
+ return status;
+ reg |= IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status)
+ return status;
+
+ /* Wait for the reset to complete. */
+ msleep(IXGBE_CS4227_RESET_DELAY);
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
+ &value);
+ if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK)
+ break;
+ msleep(IXGBE_CS4227_CHECK_DELAY);
+ }
+ if (retry == IXGBE_CS4227_RETRIES) {
+ hw_err(hw, "CS4227 reset did not complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
+ if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
+ hw_err(hw, "CS4227 EEPROM did not load successfully\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_check_cs4227 - Check CS4227 and reset as needed
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ s32 status;
+ u16 value;
+ u8 retry;
+
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_err(hw, "semaphore failed with %d\n", status);
+ msleep(IXGBE_CS4227_CHECK_DELAY);
+ continue;
+ }
+
+ /* Get status of reset flow. */
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+ if (!status && value == IXGBE_CS4227_RESET_COMPLETE)
+ goto out;
+
+ if (status || value != IXGBE_CS4227_RESET_PENDING)
+ break;
+
+ /* Reset is pending. Wait and check again. */
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msleep(IXGBE_CS4227_CHECK_DELAY);
+ }
+ /* If still pending, assume other instance failed. */
+ if (retry == IXGBE_CS4227_RETRIES) {
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_err(hw, "semaphore failed with %d\n", status);
+ return;
+ }
+ }
+
+ /* Reset the CS4227. */
+ status = ixgbe_reset_cs4227(hw);
+ if (status) {
+ hw_err(hw, "CS4227 reset failed: %d", status);
+ goto out;
+ }
+
+ /* Reset takes so long, temporarily release semaphore in case the
+ * other driver instance is waiting for the reset indication.
+ */
+ ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+ IXGBE_CS4227_RESET_PENDING);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ usleep_range(10000, 12000);
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_err(hw, "semaphore failed with %d", status);
+ return;
+ }
+
+ /* Is the CS4227 working correctly? */
+ status = ixgbe_get_cs4227_status(hw);
+ if (status) {
+ hw_err(hw, "CS4227 status failed: %d", status);
+ goto out;
+ }
+
+ /* Record completion for next time. */
+ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+ IXGBE_CS4227_RESET_COMPLETE);
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msleep(hw->eeprom.semaphore_delay);
+}
+
/** ixgbe_identify_phy_x550em - Get PHY type based on device id
* @hw: pointer to hardware structure
*
@@ -68,7 +354,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
-
+ ixgbe_check_cs4227(hw);
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
@@ -910,6 +1196,96 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
}
/**
+ * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
+ * @hw: pointer to hardware structure
+ * @linear: true if SFP module is linear
+ */
+static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+{
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_not_present:
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ *linear = true;
+ break;
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ case ixgbe_sfp_type_da_act_lmt_core0:
+ case ixgbe_sfp_type_da_act_lmt_core1:
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ case ixgbe_sfp_type_1g_lx_core0:
+ case ixgbe_sfp_type_1g_lx_core1:
+ *linear = false;
+ break;
+ case ixgbe_sfp_type_unknown:
+ case ixgbe_sfp_type_1g_cu_core0:
+ case ixgbe_sfp_type_1g_cu_core1:
+ default:
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the extern PHY and the integrated KR PHY for SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ s32 status;
+ u16 slice, value;
+ bool setup_linear = false;
+
+ /* Check if SFP module is supported and linear */
+ status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * there is no reason to configure CS4227 and SFP not present error is
+ * not accepted in the setup MAC link flow.
+ */
+ if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (status)
+ return status;
+
+ /* Configure CS4227 LINE side to 10G SR. */
+ slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12);
+ value = IXGBE_CS4227_SPEED_10G;
+ status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice,
+ value);
+
+ /* Configure CS4227 for HOST connection rate then type. */
+ slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12);
+ value = speed & IXGBE_LINK_SPEED_10GB_FULL ?
+ IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
+ status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice,
+ value);
+
+ slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12);
+ if (setup_linear)
+ value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
+ else
+ value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
+ status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice,
+ value);
+
+ /* If internal link mode is XFI, then setup XFI internal link. */
+ if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))
+ status = ixgbe_setup_ixfi_x550em(hw, &speed);
+
+ return status;
+}
+
+/**
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -1003,6 +1379,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.disable_tx_laser = NULL;
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
+ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_soft_rate_select_speed;
break;
case ixgbe_media_type_copper:
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
@@ -1018,53 +1398,18 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
*/
static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
{
- bool setup_linear;
- u16 reg_slice, edc_mode;
- s32 ret_val;
+ s32 status;
+ bool linear;
- switch (hw->phy.sfp_type) {
- case ixgbe_sfp_type_unknown:
- return 0;
- case ixgbe_sfp_type_not_present:
- return IXGBE_ERR_SFP_NOT_PRESENT;
- case ixgbe_sfp_type_da_cu_core0:
- case ixgbe_sfp_type_da_cu_core1:
- setup_linear = true;
- break;
- case ixgbe_sfp_type_srlr_core0:
- case ixgbe_sfp_type_srlr_core1:
- case ixgbe_sfp_type_da_act_lmt_core0:
- case ixgbe_sfp_type_da_act_lmt_core1:
- case ixgbe_sfp_type_1g_sx_core0:
- case ixgbe_sfp_type_1g_sx_core1:
- setup_linear = false;
- break;
- default:
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
- }
+ /* Check if SFP module is supported */
+ status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
+ if (status)
+ return status;
ixgbe_init_mac_link_ops_X550em(hw);
hw->phy.ops.reset = NULL;
- /* The CS4227 slice address is the base address + the port-pair reg
- * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
- */
- reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
-
- if (setup_linear)
- edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
-
- /* Configure CS4227 for connection type. */
- ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
- edc_mode);
-
- if (ret_val)
- ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice,
- edc_mode);
-
- return ret_val;
+ return 0;
}
/** ixgbe_get_link_capabilities_x550em - Determines link capabilities
@@ -1272,7 +1617,7 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
if (status)
return status;
- if (lsc)
+ if (lsc && phy->ops.setup_internal_link)
return phy->ops.setup_internal_link(hw);
return 0;
@@ -1927,6 +2272,62 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
}
+/**
+ * ixgbe_set_mux - Set mux for port 1 access with CS4227
+ * @hw: pointer to hardware structure
+ * @state: set mux if 1, clear if 0
+ */
+static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
+{
+ u32 esdp;
+
+ if (!hw->bus.lan_id)
+ return;
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (state)
+ esdp |= IXGBE_ESDP_SDP1;
+ else
+ esdp &= ~IXGBE_ESDP_SDP1;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and sets the I2C MUX
+ */
+static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+ s32 status;
+
+ status = ixgbe_acquire_swfw_sync_X540(hw, mask);
+ if (status)
+ return status;
+
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ ixgbe_set_mux(hw, 1);
+
+ return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and sets the I2C MUX
+ */
+static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ ixgbe_set_mux(hw, 0);
+
+ ixgbe_release_swfw_sync_X540(hw, mask);
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -1964,8 +2365,6 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
&ixgbe_set_source_address_pruning_X550, \
.set_ethertype_anti_spoofing = \
&ixgbe_set_ethertype_anti_spoofing_X550, \
- .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
- .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \
.disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
@@ -1985,6 +2384,8 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
.setup_sfp = NULL,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
};
static struct ixgbe_mac_operations mac_ops_X550EM_x = {
@@ -1997,7 +2398,8 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = {
.get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
.get_bus_info = &ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
-
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
};
#define X550_COMMON_EEP \
@@ -2039,14 +2441,17 @@ static struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
- .read_i2c_combined = &ixgbe_read_i2c_combined_generic,
- .write_i2c_combined = &ixgbe_write_i2c_combined_generic,
};
static struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
+ .read_i2c_combined = &ixgbe_read_i2c_combined_generic,
+ .write_i2c_combined = &ixgbe_write_i2c_combined_generic,
+ .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
+ .write_i2c_combined_unlocked =
+ &ixgbe_write_i2c_combined_generic_unlocked,
};
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 04c7ec8446e0..ec3147279621 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -471,6 +471,12 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
};
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
extern const struct ixgbevf_info ixgbevf_82599_vf_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 149a0b4489be..592ff237d692 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1008,7 +1008,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_ring *ring;
- int per_ring_budget;
+ int per_ring_budget, work_done = 0;
bool clean_complete = true;
ixgbevf_for_each_ring(ring, q_vector->tx)
@@ -1027,10 +1027,12 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
- ixgbevf_for_each_ring(ring, q_vector->rx)
- clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
- per_ring_budget)
- < per_ring_budget);
+ ixgbevf_for_each_ring(ring, q_vector->rx) {
+ int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+ work_done += cleaned;
+ clean_complete &= (cleaned < per_ring_budget);
+ }
#ifdef CONFIG_NET_RX_BUSY_POLL
ixgbevf_qv_unlock_napi(q_vector);
@@ -1040,7 +1042,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
/* all work done, exit the polling mode */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (adapter->rx_itr_setting & 1)
ixgbevf_set_itr(q_vector);
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
@@ -1892,9 +1894,17 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int flags = netdev->flags;
+ int xcast_mode;
+
+ xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
+ (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
+ IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
spin_lock_bh(&adapter->mbx_lock);
+ hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+
/* reprogram multicast list */
hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -3896,6 +3906,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll,
#endif
+ .ndo_features_check = passthru_features_check,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 82f44e06e5fc..340cdd469455 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -112,6 +112,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d1339b050627..427f3605cbfc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -469,6 +469,46 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
}
/**
+ * ixgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @netdev: pointer to net device structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ **/
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
+ struct net_device *netdev, int xcast_mode)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+ s32 err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_12:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+ msgbuf[1] = xcast_mode;
+
+ err = mbx->ops.write_posted(hw, msgbuf, 2);
+ if (err)
+ return err;
+
+ err = mbx->ops.read_posted(hw, msgbuf, 2);
+ if (err)
+ return err;
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ return -EPERM;
+
+ return 0;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -727,6 +767,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.check_link = ixgbevf_check_mac_link_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .update_xcast_mode = ixgbevf_update_xcast_mode,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index d40f036b6df0..ef9f7736b4dc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -63,6 +63,7 @@ struct ixgbe_mac_operations {
s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index dfb6d5f79a10..4182290fdbcf 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1618,7 +1618,6 @@ static void mv643xx_eth_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
- drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
}
static int mv643xx_eth_nway_reset(struct net_device *dev)
@@ -1877,29 +1876,19 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
struct netdev_hw_addr *ha;
int i;
- if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
- int port_num;
- u32 accept;
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+ goto promiscuous;
-oom:
- port_num = mp->port_num;
- accept = 0x01010101;
- for (i = 0; i < 0x100; i += 4) {
- wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
- wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
- }
- return;
- }
-
- mc_spec = kzalloc(0x200, GFP_ATOMIC);
- if (mc_spec == NULL)
- goto oom;
- mc_other = mc_spec + (0x100 >> 2);
+ /* Allocate both mc_spec and mc_other tables */
+ mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
+ if (!mc_spec)
+ goto promiscuous;
+ mc_other = &mc_spec[64];
netdev_for_each_mc_addr(ha, dev) {
u8 *a = ha->addr;
u32 *table;
- int entry;
+ u8 entry;
if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
table = mc_spec;
@@ -1912,12 +1901,23 @@ oom:
table[entry >> 2] |= 1 << (8 * (entry & 3));
}
- for (i = 0; i < 0x100; i += 4) {
- wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
- wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
+ for (i = 0; i < 64; i++) {
+ wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ mc_spec[i]);
+ wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ mc_other[i]);
}
kfree(mc_spec);
+ return;
+
+promiscuous:
+ for (i = 0; i < 64; i++) {
+ wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ 0x01010101u);
+ wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ 0x01010101u);
+ }
}
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 514df76fc70f..a47496a020d9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -32,6 +32,7 @@
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/clk.h>
+#include <linux/cpu.h>
/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
@@ -100,6 +101,8 @@
#define MVNETA_TXQ_CMD 0x2448
#define MVNETA_TXQ_DISABLE_SHIFT 8
#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
+#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
+#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
#define MVNETA_ACC_MODE 0x2500
@@ -191,7 +194,7 @@
#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
-#define MVNETA_MIB_COUNTERS_BASE 0x3080
+#define MVNETA_MIB_COUNTERS_BASE 0x3000
#define MVNETA_MIB_LATE_COLLISION 0x7c
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
#define MVNETA_DA_FILT_OTH_MCAST 0x3500
@@ -277,6 +280,50 @@
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+struct mvneta_statistic {
+ unsigned short offset;
+ unsigned short type;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define T_REG_32 32
+#define T_REG_64 64
+
+static const struct mvneta_statistic mvneta_statistics[] = {
+ { 0x3000, T_REG_64, "good_octets_received", },
+ { 0x3010, T_REG_32, "good_frames_received", },
+ { 0x3008, T_REG_32, "bad_octets_received", },
+ { 0x3014, T_REG_32, "bad_frames_received", },
+ { 0x3018, T_REG_32, "broadcast_frames_received", },
+ { 0x301c, T_REG_32, "multicast_frames_received", },
+ { 0x3050, T_REG_32, "unrec_mac_control_received", },
+ { 0x3058, T_REG_32, "good_fc_received", },
+ { 0x305c, T_REG_32, "bad_fc_received", },
+ { 0x3060, T_REG_32, "undersize_received", },
+ { 0x3064, T_REG_32, "fragments_received", },
+ { 0x3068, T_REG_32, "oversize_received", },
+ { 0x306c, T_REG_32, "jabber_received", },
+ { 0x3070, T_REG_32, "mac_receive_error", },
+ { 0x3074, T_REG_32, "bad_crc_event", },
+ { 0x3078, T_REG_32, "collision", },
+ { 0x307c, T_REG_32, "late_collision", },
+ { 0x2484, T_REG_32, "rx_discard", },
+ { 0x2488, T_REG_32, "rx_overrun", },
+ { 0x3020, T_REG_32, "frames_64_octets", },
+ { 0x3024, T_REG_32, "frames_65_to_127_octets", },
+ { 0x3028, T_REG_32, "frames_128_to_255_octets", },
+ { 0x302c, T_REG_32, "frames_256_to_511_octets", },
+ { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
+ { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
+ { 0x3038, T_REG_64, "good_octets_sent", },
+ { 0x3040, T_REG_32, "good_frames_sent", },
+ { 0x3044, T_REG_32, "excessive_collision", },
+ { 0x3048, T_REG_32, "multicast_frames_sent", },
+ { 0x304c, T_REG_32, "broadcast_frames_sent", },
+ { 0x3054, T_REG_32, "fc_sent", },
+ { 0x300c, T_REG_32, "internal_mac_transmit_err", },
+};
+
struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
@@ -285,23 +332,34 @@ struct mvneta_pcpu_stats {
u64 tx_bytes;
};
+struct mvneta_pcpu_port {
+ /* Pointer to the shared port */
+ struct mvneta_port *pp;
+
+ /* Pointer to the CPU-local NAPI struct */
+ struct napi_struct napi;
+
+ /* Cause of the previous interrupt */
+ u32 cause_rx_tx;
+};
+
struct mvneta_port {
+ struct mvneta_pcpu_port __percpu *ports;
+ struct mvneta_pcpu_stats __percpu *stats;
+
int pkt_size;
unsigned int frag_size;
void __iomem *base;
struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs;
struct net_device *dev;
-
- u32 cause_rx_tx;
- struct napi_struct napi;
+ struct notifier_block cpu_notifier;
/* Core clock */
struct clk *clk;
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
- struct mvneta_pcpu_stats *stats;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
@@ -312,6 +370,8 @@ struct mvneta_port {
unsigned int speed;
unsigned int tx_csum_limit;
int use_inband_status:1;
+
+ u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
};
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -468,7 +528,7 @@ struct mvneta_rx_queue {
/* The hardware supports eight (8) rx queues, but we are only allowing
* the first one to be used. Therefore, let's just allocate one queue.
*/
-static int rxq_number = 1;
+static int rxq_number = 8;
static int txq_number = 8;
static int rxq_def;
@@ -518,6 +578,8 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
/* Perform dummy reads from MIB counters */
for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+ dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+ dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
}
/* Get System Network Statistics */
@@ -746,7 +808,6 @@ static void mvneta_port_up(struct mvneta_port *pp)
u32 q_map;
/* Enable all initialized TXs. */
- mvneta_mib_counters_clear(pp);
q_map = 0;
for (queue = 0; queue < txq_number; queue++) {
struct mvneta_tx_queue *txq = &pp->txqs[queue];
@@ -756,14 +817,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
/* Enable all initialized RXQs. */
- q_map = 0;
- for (queue = 0; queue < rxq_number; queue++) {
- struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
- if (rxq->descs != NULL)
- q_map |= (1 << queue);
- }
-
- mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
+ mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
}
/* Stop the Ethernet port activity */
@@ -949,7 +1003,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
/* Set CPU queue access map - all CPUs have access to all RX
* queues and to all TX queues
*/
- for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
+ for_each_present_cpu(cpu)
mvreg_write(pp, MVNETA_CPU_MAP(cpu),
(MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
@@ -1030,6 +1084,8 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_INTR_ENABLE,
(MVNETA_RXQ_INTR_ENABLE_ALL_MASK
| MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+
+ mvneta_mib_counters_clear(pp);
}
/* Set max sizes for tx queues */
@@ -1426,17 +1482,6 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
return MVNETA_TX_L4_CSUM_NOT;
}
-/* Returns rx queue pointer (find last set bit) according to causeRxTx
- * value
- */
-static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
- u32 cause)
-{
- int queue = fls(cause >> 8) - 1;
-
- return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
-}
-
/* Drop packets received by the RXQ and free buffers */
static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq)
@@ -1461,6 +1506,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
+ struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
int rx_done;
u32 rcvd_pkts = 0;
@@ -1515,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&pp->napi, skb);
+ napi_gro_receive(&port->napi, skb);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -1550,7 +1596,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&pp->napi, skb);
+ napi_gro_receive(&port->napi, skb);
}
if (rcvd_pkts) {
@@ -2061,12 +2107,10 @@ static void mvneta_set_rx_mode(struct net_device *dev)
/* Interrupt handling - the callback for request_irq() */
static irqreturn_t mvneta_isr(int irq, void *dev_id)
{
- struct mvneta_port *pp = (struct mvneta_port *)dev_id;
-
- /* Mask all interrupts */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
- napi_schedule(&pp->napi);
+ disable_percpu_irq(port->pp->dev->irq);
+ napi_schedule(&port->napi);
return IRQ_HANDLED;
}
@@ -2104,11 +2148,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
{
int rx_done = 0;
u32 cause_rx_tx;
- unsigned long flags;
struct mvneta_port *pp = netdev_priv(napi->dev);
+ struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
if (!netif_running(pp->dev)) {
- napi_complete(napi);
+ napi_complete(&port->napi);
return rx_done;
}
@@ -2135,47 +2179,17 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
/* For the case where the last mvneta_poll did not process all
* RX packets
*/
- cause_rx_tx |= pp->cause_rx_tx;
- if (rxq_number > 1) {
- while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
- int count;
- struct mvneta_rx_queue *rxq;
- /* get rx queue number from cause_rx_tx */
- rxq = mvneta_rx_policy(pp, cause_rx_tx);
- if (!rxq)
- break;
-
- /* process the packet in that rx queue */
- count = mvneta_rx(pp, budget, rxq);
- rx_done += count;
- budget -= count;
- if (budget > 0) {
- /* set off the rx bit of the
- * corresponding bit in the cause rx
- * tx register, so that next iteration
- * will find the next rx queue where
- * packets are received on
- */
- cause_rx_tx &= ~((1 << rxq->id) << 8);
- }
- }
- } else {
- rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
- budget -= rx_done;
- }
+ cause_rx_tx |= port->cause_rx_tx;
+ rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
+ budget -= rx_done;
if (budget > 0) {
cause_rx_tx = 0;
- napi_complete(napi);
- local_irq_save(flags);
- mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number) |
- MVNETA_TX_INTR_MASK(txq_number) |
- MVNETA_MISCINTR_INTR_MASK);
- local_irq_restore(flags);
+ napi_complete(&port->napi);
+ enable_percpu_irq(pp->dev->irq, 0);
}
- pp->cause_rx_tx = cause_rx_tx;
+ port->cause_rx_tx = cause_rx_tx;
return rx_done;
}
@@ -2379,26 +2393,19 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp)
/* Cleanup all Rx queues */
static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{
- int queue;
-
- for (queue = 0; queue < rxq_number; queue++)
- mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
+ mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
}
/* Init all Rx queues */
static int mvneta_setup_rxqs(struct mvneta_port *pp)
{
- int queue;
-
- for (queue = 0; queue < rxq_number; queue++) {
- int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
- if (err) {
- netdev_err(pp->dev, "%s: can't create rxq=%d\n",
- __func__, queue);
- mvneta_cleanup_rxqs(pp);
- return err;
- }
+ int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+ __func__, rxq_def);
+ mvneta_cleanup_rxqs(pp);
+ return err;
}
return 0;
@@ -2424,6 +2431,8 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
static void mvneta_start_dev(struct mvneta_port *pp)
{
+ unsigned int cpu;
+
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2431,7 +2440,11 @@ static void mvneta_start_dev(struct mvneta_port *pp)
mvneta_port_enable(pp);
/* Enable polling on the port */
- napi_enable(&pp->napi);
+ for_each_present_cpu(cpu) {
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ napi_enable(&port->napi);
+ }
/* Unmask interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
@@ -2449,9 +2462,15 @@ static void mvneta_start_dev(struct mvneta_port *pp)
static void mvneta_stop_dev(struct mvneta_port *pp)
{
+ unsigned int cpu;
+
phy_stop(pp->phy_dev);
- napi_disable(&pp->napi);
+ for_each_present_cpu(cpu) {
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ napi_disable(&port->napi);
+ }
netif_carrier_off(pp->dev);
@@ -2691,6 +2710,125 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
pp->phy_dev = NULL;
}
+static void mvneta_percpu_enable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ disable_percpu_irq(pp->dev->irq);
+}
+
+static void mvneta_percpu_elect(struct mvneta_port *pp)
+{
+ int online_cpu_idx, cpu, i = 0;
+
+ online_cpu_idx = rxq_def % num_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ if (i == online_cpu_idx)
+ /* Enable per-CPU interrupt on the one CPU we
+ * just elected
+ */
+ smp_call_function_single(cpu, mvneta_percpu_enable,
+ pp, true);
+ else
+ /* Disable per-CPU interrupt on all the other CPU */
+ smp_call_function_single(cpu, mvneta_percpu_disable,
+ pp, true);
+ i++;
+ }
+};
+
+static int mvneta_percpu_notifier(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
+ cpu_notifier);
+ int cpu = (unsigned long)hcpu, other_cpu;
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ netif_tx_stop_all_queues(pp->dev);
+
+ /* We have to synchronise on tha napi of each CPU
+ * except the one just being waked up
+ */
+ for_each_online_cpu(other_cpu) {
+ if (other_cpu != cpu) {
+ struct mvneta_pcpu_port *other_port =
+ per_cpu_ptr(pp->ports, other_cpu);
+
+ napi_synchronize(&other_port->napi);
+ }
+ }
+
+ /* Mask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+ napi_enable(&port->napi);
+
+ /* Enable per-CPU interrupt on the one CPU we care
+ * about.
+ */
+ mvneta_percpu_elect(pp);
+
+ /* Unmask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number) |
+ MVNETA_TX_INTR_MASK(txq_number) |
+ MVNETA_MISCINTR_INTR_MASK);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE |
+ MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ netif_tx_start_all_queues(pp->dev);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ netif_tx_stop_all_queues(pp->dev);
+ /* Mask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+
+ napi_synchronize(&port->napi);
+ napi_disable(&port->napi);
+ /* Disable per-CPU interrupts on the CPU that is
+ * brought down.
+ */
+ smp_call_function_single(cpu, mvneta_percpu_disable,
+ pp, true);
+
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ /* Check if a new CPU must be elected now this on is down */
+ mvneta_percpu_elect(pp);
+ /* Unmask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number) |
+ MVNETA_TX_INTR_MASK(txq_number) |
+ MVNETA_MISCINTR_INTR_MASK);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE |
+ MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ netif_tx_start_all_queues(pp->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static int mvneta_open(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -2709,13 +2847,29 @@ static int mvneta_open(struct net_device *dev)
goto err_cleanup_rxqs;
/* Connect to port interrupt line */
- ret = request_irq(pp->dev->irq, mvneta_isr, 0,
- MVNETA_DRIVER_NAME, pp);
+ ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
+ MVNETA_DRIVER_NAME, pp->ports);
if (ret) {
netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
goto err_cleanup_txqs;
}
+ /* Even though the documentation says that request_percpu_irq
+ * doesn't enable the interrupts automatically, it actually
+ * does so on the local CPU.
+ *
+ * Make sure it's disabled.
+ */
+ mvneta_percpu_disable(pp);
+
+ /* Elect a CPU to handle our RX queue interrupt */
+ mvneta_percpu_elect(pp);
+
+ /* Register a CPU notifier to handle the case where our CPU
+ * might be taken offline.
+ */
+ register_cpu_notifier(&pp->cpu_notifier);
+
/* In default link is down */
netif_carrier_off(pp->dev);
@@ -2730,7 +2884,7 @@ static int mvneta_open(struct net_device *dev)
return 0;
err_free_irq:
- free_irq(pp->dev->irq, pp);
+ free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);
err_cleanup_rxqs:
@@ -2742,10 +2896,14 @@ err_cleanup_rxqs:
static int mvneta_stop(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
+ int cpu;
mvneta_stop_dev(pp);
mvneta_mdio_remove(pp);
- free_irq(dev->irq, pp);
+ unregister_cpu_notifier(&pp->cpu_notifier);
+ for_each_present_cpu(cpu)
+ smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
+ free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
@@ -2875,6 +3033,65 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
return 0;
}
+static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ if (sset == ETH_SS_STATS) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mvneta_statistics[i].name, ETH_GSTRING_LEN);
+ }
+}
+
+static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
+{
+ const struct mvneta_statistic *s;
+ void __iomem *base = pp->base;
+ u32 high, low, val;
+ int i;
+
+ for (i = 0, s = mvneta_statistics;
+ s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
+ s++, i++) {
+ val = 0;
+
+ switch (s->type) {
+ case T_REG_32:
+ val = readl_relaxed(base + s->offset);
+ break;
+ case T_REG_64:
+ /* Docs say to read low 32-bit then high */
+ low = readl_relaxed(base + s->offset);
+ high = readl_relaxed(base + s->offset + 4);
+ val = (u64)high << 32 | low;
+ break;
+ }
+
+ pp->ethtool_stats[i] += val;
+ }
+}
+
+static void mvneta_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int i;
+
+ mvneta_ethtool_update_stats(pp);
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ *data++ = pp->ethtool_stats[i];
+}
+
+static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(mvneta_statistics);
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -2896,6 +3113,9 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
.get_drvinfo = mvneta_ethtool_get_drvinfo,
.get_ringparam = mvneta_ethtool_get_ringparam,
.set_ringparam = mvneta_ethtool_set_ringparam,
+ .get_strings = mvneta_ethtool_get_strings,
+ .get_ethtool_stats = mvneta_ethtool_get_stats,
+ .get_sset_count = mvneta_ethtool_get_sset_count,
};
/* Initialize hw */
@@ -3032,14 +3252,7 @@ static int mvneta_probe(struct platform_device *pdev)
const char *managed;
int phy_mode;
int err;
-
- /* Our multiqueue support is not complete, so for now, only
- * allow the usage of the first RX queue
- */
- if (rxq_def != 0) {
- dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
- return -EINVAL;
- }
+ int cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
if (!dev)
@@ -3091,6 +3304,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = of_property_read_string(dn, "managed", &managed);
pp->use_inband_status = (err == 0 &&
strcmp(managed, "in-band-status") == 0);
+ pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
@@ -3107,11 +3321,18 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_clk;
}
+ /* Alloc per-cpu port structure */
+ pp->ports = alloc_percpu(struct mvneta_pcpu_port);
+ if (!pp->ports) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
/* Alloc per-cpu stats */
pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
if (!pp->stats) {
err = -ENOMEM;
- goto err_clk;
+ goto err_free_ports;
}
dt_mac_addr = of_get_mac_address(dn);
@@ -3152,7 +3373,12 @@ static int mvneta_probe(struct platform_device *pdev)
if (dram_target_info)
mvneta_conf_mbus_windows(pp, dram_target_info);
- netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ for_each_present_cpu(cpu) {
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ port->pp = pp;
+ }
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->hw_features |= dev->features;
@@ -3183,6 +3409,8 @@ static int mvneta_probe(struct platform_device *pdev)
err_free_stats:
free_percpu(pp->stats);
+err_free_ports:
+ free_percpu(pp->ports);
err_clk:
clk_disable_unprepare(pp->clk);
err_put_phy_node:
@@ -3202,6 +3430,7 @@ static int mvneta_remove(struct platform_device *pdev)
unregister_netdev(dev);
clk_disable_unprepare(pp->clk);
+ free_percpu(pp->ports);
free_percpu(pp->stats);
irq_dispose_mapping(dev->irq);
of_node_put(pp->phy_node);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index d9f4498832a1..5606a043063e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4819,6 +4819,18 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
ETH_ALEN);
+ /* if the address is invalid, use a random value */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ struct sockaddr sa = { AF_UNSPEC };
+
+ netdev_warn(dev,
+ "Invalid MAC address, defaulting to random\n");
+ eth_hw_addr_random(dev);
+ memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
+ if (sky2_set_mac_address(dev, &sa))
+ netdev_warn(dev, "Failed to set MAC address.\n");
+ }
+
return dev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f79d8124321e..ddb5541882f5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -95,9 +95,6 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
(u16) (mdev->dev->caps.fw_ver & 0xffff));
strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4726122ea76b..886e1bc86374 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -573,10 +573,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
- struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
- u64 reg_id = 0;
int *qpn = &priv->base_qpn;
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
@@ -600,44 +598,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");
- goto qp_err;
- }
-
- err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
- if (err)
- goto steer_err;
-
- err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
- &priv->tunnel_reg_id);
- if (err)
- goto tunnel_err;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto alloc_err;
+ mlx4_unregister_mac(dev, priv->port, mac);
+ return err;
}
- memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
- memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
- entry->reg_id = reg_id;
-
- hlist_add_head_rcu(&entry->hlist,
- &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
return 0;
-
-alloc_err:
- if (priv->tunnel_reg_id)
- mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
-tunnel_err:
- mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
-
-steer_err:
- mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
- mlx4_unregister_mac(dev, priv->port, mac);
- return err;
}
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
@@ -645,39 +610,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int qpn = priv->base_qpn;
- u64 mac;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
} else {
- struct mlx4_mac_entry *entry;
- struct hlist_node *tmp;
- struct hlist_head *bucket;
- unsigned int i;
-
- for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
- bucket = &priv->mac_hash[i];
- hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_mac_to_u64(entry->mac);
- en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
- entry->mac);
- mlx4_en_uc_steer_release(priv, entry->mac,
- qpn, entry->reg_id);
-
- mlx4_unregister_mac(dev, priv->port, mac);
- hlist_del_rcu(&entry->hlist);
- kfree_rcu(entry, rcu);
- }
- }
-
- if (priv->tunnel_reg_id) {
- mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
- priv->tunnel_reg_id = 0;
- }
-
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
@@ -1283,6 +1222,75 @@ static void mlx4_en_netpoll(struct net_device *dev)
}
#endif
+static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+ u64 reg_id;
+ int err = 0;
+ int *qpn = &priv->base_qpn;
+ struct mlx4_mac_entry *entry;
+
+ err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+ if (err)
+ return err;
+
+ err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+ &priv->tunnel_reg_id);
+ if (err)
+ goto tunnel_err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+ memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
+ entry->reg_id = reg_id;
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+ return 0;
+
+alloc_err:
+ if (priv->tunnel_reg_id)
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+
+tunnel_err:
+ mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+ return err;
+}
+
+static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+ u64 mac;
+ unsigned int i;
+ int qpn = priv->base_qpn;
+ struct hlist_head *bucket;
+ struct hlist_node *tmp;
+ struct mlx4_mac_entry *entry;
+
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+ bucket = &priv->mac_hash[i];
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
+ mac = mlx4_mac_to_u64(entry->mac);
+ en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
+ entry->mac);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+
+ mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ }
+ }
+
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+}
+
static void mlx4_en_tx_timeout(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1684,6 +1692,11 @@ int mlx4_en_start_port(struct net_device *dev)
goto tx_err;
}
+ /* Set Unicast and VXLAN steering rules */
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
+ mlx4_en_set_rss_steer_rules(priv))
+ mlx4_warn(mdev, "Failed setting steering rules\n");
+
/* Attach rx QP to bradcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
@@ -1831,6 +1844,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
for (i = 0; i < priv->tx_ring_num; i++)
mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+ mlx4_en_delete_rss_steer_rules(priv);
+
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
@@ -2800,7 +2816,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_priv *priv;
int i;
int err;
- u64 mac_u64;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2892,17 +2907,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
- if (mlx4_is_slave(priv->mdev->dev)) {
- eth_hw_addr_random(dev);
- en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
- mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
- mdev->dev->caps.def_mac[priv->port] = mac_u64;
- } else {
- en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
- priv->port, dev->dev_addr);
- err = -EINVAL;
- goto out;
- }
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ priv->port, dev->dev_addr);
+ err = -EINVAL;
+ goto out;
+ } else if (mlx4_is_slave(priv->mdev->dev) &&
+ (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
+ /* Random MAC was assigned in mlx4_slave_cap
+ * in mlx4_core module
+ */
+ dev->addr_assign_type |= NET_ADDR_RANDOM;
+ en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
}
memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e8ec1dec5789..f13a4d7bbf95 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2840,3 +2840,19 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
return -EOPNOTSUPP;
}
EXPORT_SYMBOL(set_phv_bit);
+
+void mlx4_replace_zero_macs(struct mlx4_dev *dev)
+{
+ int i;
+ u8 mac_addr[ETH_ALEN];
+
+ dev->port_random_macs = 0;
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ if (!dev->caps.def_mac[i] &&
+ dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
+ eth_random_addr(mac_addr);
+ dev->port_random_macs |= 1 << i;
+ dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index cc3a9897574c..85f1b1e7e505 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -863,6 +863,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
+ mlx4_replace_zero_macs(dev);
+
dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 232b2b55f23b..e1cf9036af22 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1378,6 +1378,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
void mlx4_init_quotas(struct mlx4_dev *dev);
+/* for VFs, replace zero MACs with randomly-generated MACs at driver start */
+void mlx4_replace_zero_macs(struct mlx4_dev *dev);
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
/* Returns the VF index of slave */
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 78f51e103880..93195191f45b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -318,7 +318,7 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
key, NULL);
} else {
mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR_OR_NULL(mailbox))
+ if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 20268634a9ab..3311f35d08e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -422,15 +422,15 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
u64 qp_mask = 0;
int err = 0;
+ if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+ return -EINVAL;
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cmd = (struct mlx4_update_qp_context *)mailbox->buf;
- if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
- return -EINVAL;
-
if (attr & MLX4_UPDATE_QP_SMAC) {
pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 731423ca575d..ac4b99ab1f85 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1238,8 +1238,10 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
return 0;
undo:
- for (--i; i >= base; --i)
+ for (--i; i >= 0; --i) {
rb_erase(&res_arr[i]->node, root);
+ list_del_init(&res_arr[i]->list);
+ }
spin_unlock_irq(mlx4_tlock(dev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 75ff58dc1ff5..fabfc9e0a948 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -254,6 +254,156 @@ static void dump_buf(void *buf, int size, int data_only, int offset)
pr_debug("\n");
}
+enum {
+ MLX5_DRIVER_STATUS_ABORTED = 0xfe,
+ MLX5_DRIVER_SYND = 0xbadd00de,
+};
+
+static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
+ u32 *synd, u8 *status)
+{
+ *synd = 0;
+ *status = 0;
+
+ switch (op) {
+ case MLX5_CMD_OP_TEARDOWN_HCA:
+ case MLX5_CMD_OP_DISABLE_HCA:
+ case MLX5_CMD_OP_MANAGE_PAGES:
+ case MLX5_CMD_OP_DESTROY_MKEY:
+ case MLX5_CMD_OP_DESTROY_EQ:
+ case MLX5_CMD_OP_DESTROY_CQ:
+ case MLX5_CMD_OP_DESTROY_QP:
+ case MLX5_CMD_OP_DESTROY_PSV:
+ case MLX5_CMD_OP_DESTROY_SRQ:
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ case MLX5_CMD_OP_DESTROY_DCT:
+ case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_DEALLOC_PD:
+ case MLX5_CMD_OP_DEALLOC_UAR:
+ case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ case MLX5_CMD_OP_DEALLOC_XRCD:
+ case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_DESTROY_TIR:
+ case MLX5_CMD_OP_DESTROY_SQ:
+ case MLX5_CMD_OP_DESTROY_RQ:
+ case MLX5_CMD_OP_DESTROY_RMP:
+ case MLX5_CMD_OP_DESTROY_TIS:
+ case MLX5_CMD_OP_DESTROY_RQT:
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
+ case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+ return MLX5_CMD_STAT_OK;
+
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_INIT_HCA:
+ case MLX5_CMD_OP_ENABLE_HCA:
+ case MLX5_CMD_OP_QUERY_PAGES:
+ case MLX5_CMD_OP_SET_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_SET_ISSI:
+ case MLX5_CMD_OP_CREATE_MKEY:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+ case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+ case MLX5_CMD_OP_CREATE_EQ:
+ case MLX5_CMD_OP_QUERY_EQ:
+ case MLX5_CMD_OP_GEN_EQE:
+ case MLX5_CMD_OP_CREATE_CQ:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_MODIFY_CQ:
+ case MLX5_CMD_OP_CREATE_QP:
+ case MLX5_CMD_OP_RST2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_SQD_RTS_QP:
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_CREATE_PSV:
+ case MLX5_CMD_OP_CREATE_SRQ:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_ARM_RQ:
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_DCT:
+ case MLX5_CMD_OP_DRAIN_DCT:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_MODIFY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OP_SET_ROCE_ADDRESS:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_ALLOC_PD:
+ case MLX5_CMD_OP_ALLOC_UAR:
+ case MLX5_CMD_OP_CONFIG_INT_MODERATION:
+ case MLX5_CMD_OP_ACCESS_REG:
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ case MLX5_CMD_OP_MAD_IFC:
+ case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+ case MLX5_CMD_OP_SET_MAD_DEMUX:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_TIR:
+ case MLX5_CMD_OP_MODIFY_TIR:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_CREATE_SQ:
+ case MLX5_CMD_OP_MODIFY_SQ:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_MODIFY_RQ:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_CREATE_RMP:
+ case MLX5_CMD_OP_MODIFY_RMP:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_CREATE_TIS:
+ case MLX5_CMD_OP_MODIFY_TIS:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_CREATE_RQT:
+ case MLX5_CMD_OP_MODIFY_RQT:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ *status = MLX5_DRIVER_STATUS_ABORTED;
+ *synd = MLX5_DRIVER_SYND;
+ return -EIO;
+ default:
+ mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
+ return -EINVAL;
+ }
+}
+
const char *mlx5_command_str(int command)
{
switch (command) {
@@ -473,6 +623,7 @@ static void cmd_work_handler(struct work_struct *work)
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
+ unsigned long flags;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -485,6 +636,9 @@ static void cmd_work_handler(struct work_struct *work)
}
} else {
ent->idx = cmd->max_reg_cmds;
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+ clear_bit(ent->idx, &cmd->bitmask);
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
ent->token = alloc_token(cmd);
@@ -584,6 +738,16 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
return err;
}
+static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
+{
+ return &out->syndrome;
+}
+
+static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
+{
+ return &out->status;
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -1081,7 +1245,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
}
}
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -1092,7 +1256,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
s64 ds;
struct mlx5_cmd_stats *stats;
unsigned long flags;
+ unsigned long vector;
+ /* there can be at most 32 command queues */
+ vector = vec & 0xffffffff;
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
struct semaphore *sem;
@@ -1110,11 +1277,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
ent->ret = verify_signature(ent);
else
ent->ret = 0;
- ent->status = ent->lay->status_own >> 1;
+ if (vec & MLX5_TRIGGERED_CMD_COMP)
+ ent->status = MLX5_DRIVER_STATUS_ABORTED;
+ else
+ ent->status = ent->lay->status_own >> 1;
+
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status);
}
free_ent(cmd, ent->idx);
+
if (ent->callback) {
ds = ent->ts2 - ent->ts1;
if (ent->op < ARRAY_SIZE(cmd->stats)) {
@@ -1136,6 +1308,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
+ err = err ? err : ent->status;
free_cmd(ent);
callback(err, context);
} else {
@@ -1183,6 +1356,11 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
return msg;
}
+static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
+{
+ return be16_to_cpu(in->opcode);
+}
+
static int is_manage_pages(struct mlx5_inbox_hdr *in)
{
return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
@@ -1197,6 +1375,15 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
gfp_t gfp;
int err;
u8 status = 0;
+ u32 drv_synd;
+
+ if (pci_channel_offline(dev->pdev) ||
+ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
+ *get_synd_ptr(out) = cpu_to_be32(drv_synd);
+ *get_status_ptr(out) = status;
+ return err;
+ }
pages_queue = is_manage_pages(in);
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
@@ -1363,6 +1550,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
int err;
int i;
+ memset(cmd, 0, sizeof(*cmd));
cmd_if_rev = cmdif_rev(dev);
if (cmd_if_rev != CMD_IF_REV) {
dev_err(&dev->pdev->dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 04ab7e445eae..b51e42d6fbec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -242,6 +242,7 @@ int mlx5_init_cq_table(struct mlx5_core_dev *dev)
struct mlx5_cq_table *table = &dev->priv.cq_table;
int err;
+ memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
err = mlx5_cq_debugfs_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0983a208b299..f2ae62dd8c09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -617,5 +617,11 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
}
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+ return min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
+}
+
extern const struct ethtool_ops mlx5e_ethtool_ops;
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index bce912688ca8..2e022e900939 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -345,9 +345,8 @@ static void mlx5e_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
- ch->max_combined = ncv;
+ ch->max_combined = mlx5e_get_max_num_channels(priv->mdev);
ch->combined_count = priv->params.num_channels;
}
@@ -355,7 +354,7 @@ static int mlx5e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+ int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count;
bool was_opened;
int err = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 59874d666cff..5fc4d2d78cdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -442,12 +442,12 @@ static void mlx5e_disable_rq(struct mlx5e_rq *rq)
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_wq_ll *wq = &rq->wq;
- int i;
- for (i = 0; i < 1000; i++) {
+ while (time_before(jiffies, exp_time)) {
if (wq->cur_sz >= priv->params.min_rx_wqes)
return 0;
@@ -1367,13 +1367,13 @@ int mlx5e_open_locked(struct net_device *netdev)
err = mlx5e_set_dev_port_mtu(netdev);
if (err)
- return err;
+ goto err_clear_state_opened_flag;
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
__func__, err);
- return err;
+ goto err_clear_state_opened_flag;
}
mlx5e_update_carrier(priv);
@@ -1382,6 +1382,10 @@ int mlx5e_open_locked(struct net_device *netdev)
schedule_delayed_work(&priv->update_stats_work, 0);
return 0;
+
+err_clear_state_opened_flag:
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ return err;
}
static int mlx5e_open(struct net_device *netdev)
@@ -1400,6 +1404,12 @@ int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ /* May already be CLOSED in case a previous configuration operation
+ * (e.g RX/TX queue size change) that involves close&open failed.
+ */
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_redirect_rqts(priv);
@@ -1833,7 +1843,7 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_disable_vlan_filter(priv);
}
- return 0;
+ return err;
}
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
@@ -1994,6 +2004,7 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -2037,8 +2048,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
struct mlx5e_priv *priv;
- int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
- MLX5E_MAX_NUM_CHANNELS);
+ int nch = mlx5e_get_max_num_channels(mdev);
int err;
if (mlx5e_check_required_hca_cap(mdev))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index b73672f32e2c..cd8f85a251d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -116,7 +116,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
-#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+#define MLX5E_MIN_INLINE ETH_HLEN
if (bf && (skb_headlen(skb) <= sq->max_inline))
return skb_headlen(skb);
@@ -124,6 +124,21 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
return MLX5E_MIN_INLINE;
}
+static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
+{
+ struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
+ int cpy1_sz = 2 * ETH_ALEN;
+ int cpy2_sz = ihs - cpy1_sz;
+
+ skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
+ skb_pull_inline(skb, cpy1_sz);
+ vhdr->h_vlan_proto = skb->vlan_proto;
+ vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
+ skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
+ cpy2_sz);
+ skb_pull_inline(skb, cpy2_sz);
+}
+
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -175,8 +190,13 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
ETH_ZLEN);
}
- skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
- skb_pull_inline(skb, ihs);
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
+ ihs += VLAN_HLEN;
+ } else {
+ skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+ skb_pull_inline(skb, ihs);
+ }
eseg->inline_hdr_sz = cpu_to_be16(ihs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a40b96d4c662..713ead583347 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -346,6 +346,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int inlen;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
+ eq->cons_index = 0;
err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
@@ -381,10 +382,10 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
- eq->irqn = vecidx;
+ eq->irqn = priv->msix_arr[vecidx].vector;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+ err = request_irq(eq->irqn, mlx5_msix_handler, 0,
priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
@@ -420,12 +421,12 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
int err;
mlx5_debug_eq_remove(dev, eq);
- free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
+ free_irq(eq->irqn, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
+ synchronize_irq(eq->irqn);
mlx5_buf_free(dev, &eq->buf);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 292d76f2a904..f5deb642d0d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
+#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
@@ -46,39 +47,113 @@ enum {
enum {
MLX5_HEALTH_SYNDR_FW_ERR = 0x1,
MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7,
+ MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8,
MLX5_HEALTH_SYNDR_CRC_ERR = 0x9,
MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa,
MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb,
MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc,
MLX5_HEALTH_SYNDR_EQ_ERR = 0xd,
+ MLX5_HEALTH_SYNDR_EQ_INV = 0xe,
MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf,
+ MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10
};
-static DEFINE_SPINLOCK(health_lock);
-static LIST_HEAD(health_list);
-static struct work_struct health_work;
+enum {
+ MLX5_NIC_IFC_FULL = 0,
+ MLX5_NIC_IFC_DISABLED = 1,
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2
+};
-static void health_care(struct work_struct *work)
+static u8 get_nic_interface(struct mlx5_core_dev *dev)
{
- struct mlx5_core_health *health, *n;
- struct mlx5_core_dev *dev;
- struct mlx5_priv *priv;
- LIST_HEAD(tlist);
+ return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
+}
+
+static void trigger_cmd_completions(struct mlx5_core_dev *dev)
+{
+ unsigned long flags;
+ u64 vector;
- spin_lock_irq(&health_lock);
- list_splice_init(&health_list, &tlist);
+ /* wait for pending handlers to complete */
+ synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
+ spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+ vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+ if (!vector)
+ goto no_trig;
+
+ vector |= MLX5_TRIGGERED_CMD_COMP;
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+ mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+ mlx5_cmd_comp_handler(dev, vector);
+ return;
+
+no_trig:
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
+
+static int in_fatal(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ struct health_buffer __iomem *h = health->health;
- spin_unlock_irq(&health_lock);
+ if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+ return 1;
- list_for_each_entry_safe(health, n, &tlist, list) {
- priv = container_of(health, struct mlx5_priv, health);
- dev = container_of(priv, struct mlx5_core_dev, priv);
- mlx5_core_warn(dev, "handling bad device here\n");
- /* nothing yet */
- spin_lock_irq(&health_lock);
- list_del_init(&health->list);
- spin_unlock_irq(&health_lock);
+ if (ioread32be(&h->fw_ver) == 0xffffffff)
+ return 1;
+
+ return 0;
+}
+
+void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+{
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return;
+
+ mlx5_core_err(dev, "start\n");
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+
+ mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
+ mlx5_core_err(dev, "end\n");
+}
+
+static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
+{
+ u8 nic_interface = get_nic_interface(dev);
+
+ switch (nic_interface) {
+ case MLX5_NIC_IFC_FULL:
+ mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
+ break;
+
+ case MLX5_NIC_IFC_DISABLED:
+ mlx5_core_warn(dev, "starting teardown\n");
+ break;
+
+ case MLX5_NIC_IFC_NO_DRAM_NIC:
+ mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
+ break;
+ default:
+ mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
+ nic_interface);
}
+
+ mlx5_disable_device(dev);
+}
+
+static void health_care(struct work_struct *work)
+{
+ struct mlx5_core_health *health;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+
+ health = container_of(work, struct mlx5_core_health, work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+ mlx5_core_warn(dev, "handling bad device here\n");
+ mlx5_handle_bad_state(dev);
}
static const char *hsynd_str(u8 synd)
@@ -88,6 +163,8 @@ static const char *hsynd_str(u8 synd)
return "firmware internal error";
case MLX5_HEALTH_SYNDR_IRISC_ERR:
return "irisc not responding";
+ case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
+ return "unrecoverable hardware error";
case MLX5_HEALTH_SYNDR_CRC_ERR:
return "firmware CRC error";
case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
@@ -98,48 +175,81 @@ static const char *hsynd_str(u8 synd)
return "async EQ buffer overrun";
case MLX5_HEALTH_SYNDR_EQ_ERR:
return "EQ error";
+ case MLX5_HEALTH_SYNDR_EQ_INV:
+ return "Invalid EQ refrenced";
case MLX5_HEALTH_SYNDR_FFSER_ERR:
return "FFSER error";
+ case MLX5_HEALTH_SYNDR_HIGH_TEMP:
+ return "High temprature";
default:
return "unrecognized error";
}
}
-static u16 read_be16(__be16 __iomem *p)
+static u16 get_maj(u32 fw)
{
- return swab16(readl((__force u16 __iomem *) p));
+ return fw >> 28;
}
-static u32 read_be32(__be32 __iomem *p)
+static u16 get_min(u32 fw)
{
- return swab32(readl((__force u32 __iomem *) p));
+ return fw >> 16 & 0xfff;
+}
+
+static u16 get_sub(u32 fw)
+{
+ return fw & 0xffff;
}
static void print_health_info(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
+ char fw_str[18];
+ u32 fw;
int i;
+ /* If the syndrom is 0, the device is OK and no need to print buffer */
+ if (!ioread8(&h->synd))
+ return;
+
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
- pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i));
+ dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
+
+ dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
+ dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+ fw = ioread32be(&h->fw_ver);
+ sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
+ dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
+ dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+ dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
+ dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
+ dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+}
+
+static unsigned long get_next_poll_jiffies(void)
+{
+ unsigned long next;
- pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr));
- pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra));
- pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver));
- pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id));
- pr_info("irisc_index %d\n", readb(&h->irisc_index));
- pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd)));
- pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_sync));
+ get_random_bytes(&next, sizeof(next));
+ next %= HZ;
+ next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+
+ return next;
}
static void poll_health(unsigned long data)
{
struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long next;
u32 count;
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ trigger_cmd_completions(dev);
+ mod_timer(&health->timer, get_next_poll_jiffies());
+ return;
+ }
+
count = ioread32be(health->health_counter);
if (count == health->prev)
++health->miss_counter;
@@ -148,18 +258,16 @@ static void poll_health(unsigned long data)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
- mlx5_core_err(dev, "device's health compromised\n");
+ dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
print_health_info(dev);
- spin_lock_irq(&health_lock);
- list_add_tail(&health->list, &health_list);
- spin_unlock_irq(&health_lock);
-
- queue_work(mlx5_core_wq, &health_work);
} else {
- get_random_bytes(&next, sizeof(next));
- next %= HZ;
- next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
- mod_timer(&health->timer, next);
+ mod_timer(&health->timer, get_next_poll_jiffies());
+ }
+
+ if (in_fatal(dev) && !health->sick) {
+ health->sick = true;
+ print_health_info(dev);
+ queue_work(health->wq, &health->work);
}
}
@@ -167,7 +275,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- INIT_LIST_HEAD(&health->list);
init_timer(&health->timer);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -183,18 +290,33 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
del_timer_sync(&health->timer);
-
- spin_lock_irq(&health_lock);
- if (!list_empty(&health->list))
- list_del_init(&health->list);
- spin_unlock_irq(&health_lock);
}
-void mlx5_health_cleanup(void)
+void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ destroy_workqueue(health->wq);
}
-void __init mlx5_health_init(void)
+int mlx5_health_init(struct mlx5_core_dev *dev)
{
- INIT_WORK(&health_work, health_care);
+ struct mlx5_core_health *health;
+ char *name;
+
+ health = &dev->priv.health;
+ name = kmalloc(64, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strcpy(name, "mlx5_health");
+ strcat(name, dev_name(&dev->pdev->dev));
+ health->wq = create_singlethread_workqueue(name);
+ kfree(name);
+ if (!health->wq)
+ return -ENOMEM;
+
+ INIT_WORK(&health->work, health_care);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 03aabdd79abe..2388aec208fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -39,12 +39,14 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
#include <linux/kmod.h>
+#include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
@@ -62,7 +64,6 @@ static int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, int, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
-struct workqueue_struct *mlx5_core_wq;
static LIST_HEAD(intf_list);
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(intf_mutex);
@@ -152,6 +153,25 @@ static struct mlx5_profile profile[] = {
},
};
+#define FW_INIT_TIMEOUT_MILI 2000
+#define FW_INIT_WAIT_MS 2
+
+static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
+ int err = 0;
+
+ while (fw_initializing(dev)) {
+ if (time_after(jiffies, end)) {
+ err = -EBUSY;
+ break;
+ }
+ msleep(FW_INIT_WAIT_MS);
+ }
+
+ return err;
+}
+
static int set_dma_caps(struct pci_dev *pdev)
{
int err;
@@ -182,6 +202,34 @@ static int set_dma_caps(struct pci_dev *pdev)
return err;
}
+static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err = 0;
+
+ mutex_lock(&dev->pci_status_mutex);
+ if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
+ err = pci_enable_device(pdev);
+ if (!err)
+ dev->pci_status = MLX5_PCI_STATUS_ENABLED;
+ }
+ mutex_unlock(&dev->pci_status_mutex);
+
+ return err;
+}
+
+static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ mutex_lock(&dev->pci_status_mutex);
+ if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
+ pci_disable_device(pdev);
+ dev->pci_status = MLX5_PCI_STATUS_DISABLED;
+ }
+ mutex_unlock(&dev->pci_status_mutex);
+}
+
static int request_bar(struct pci_dev *pdev)
{
int err = 0;
@@ -672,12 +720,126 @@ static void unmap_bf_area(struct mlx5_core_dev *dev)
io_mapping_free(dev->priv.bf_mapping);
}
-static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
+static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
+ if (!dev_ctx)
+ return;
+
+ dev_ctx->intf = intf;
+ dev_ctx->context = intf->add(dev);
+
+ if (dev_ctx->context) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irq(&priv->ctx_lock);
+ } else {
+ kfree(dev_ctx);
+ }
+}
+
+static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf == intf) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_del(&dev_ctx->list);
+ spin_unlock_irq(&priv->ctx_lock);
+
+ intf->remove(dev, dev_ctx->context);
+ kfree(dev_ctx);
+ return;
+ }
+}
+
+static int mlx5_register_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- int err;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&intf_mutex);
+ list_add_tail(&priv->dev_list, &dev_list);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+
+static void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_remove_device(intf, priv);
+ list_del(&priv->dev_list);
+ mutex_unlock(&intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ if (!intf->add || !intf->remove)
+ return -EINVAL;
+
+ mutex_lock(&intf_mutex);
+ list_add_tail(&intf->list, &intf_list);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ mutex_lock(&intf_mutex);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx5_remove_device(intf, priv);
+ list_del(&intf->list);
+ mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+ if ((dev_ctx->intf->protocol == protocol) &&
+ dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev_ctx->context);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err = 0;
- dev->pdev = pdev;
pci_set_drvdata(dev->pdev, dev);
strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
@@ -694,7 +856,7 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
if (!priv->dbg_root)
return -ENOMEM;
- err = pci_enable_device(pdev);
+ err = mlx5_pci_enable_device(dev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_dbg;
@@ -721,13 +883,61 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
goto err_clr_master;
}
+
+ return 0;
+
+err_clr_master:
+ pci_clear_master(dev->pdev);
+ release_bar(dev->pdev);
+err_disable:
+ mlx5_pci_disable_device(dev);
+
+err_dbg:
+ debugfs_remove(priv->dbg_root);
+ return err;
+}
+
+static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ iounmap(dev->iseg);
+ pci_clear_master(dev->pdev);
+ release_bar(dev->pdev);
+ mlx5_pci_disable_device(dev);
+ debugfs_remove(priv->dbg_root);
+}
+
+#define MLX5_IB_MOD "mlx5_ib"
+static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ mutex_lock(&dev->intf_state_mutex);
+ if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+ dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
+ __func__);
+ goto out;
+ }
+
dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
fw_rev_min(dev), fw_rev_sub(dev));
+ /* on load removing any previous indication of internal error, device is
+ * up
+ */
+ dev->state = MLX5_DEVICE_STATE_UP;
+
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
- goto err_unmap;
+ goto out_err;
+ }
+
+ err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
+ FW_INIT_TIMEOUT_MILI);
+ goto out_err;
}
mlx5_pagealloc_init(dev);
@@ -842,8 +1052,29 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_init_srq_table(dev);
mlx5_init_mr_table(dev);
+ err = mlx5_register_device(dev);
+ if (err) {
+ dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+ goto err_reg_dev;
+ }
+
+ err = request_module_nowait(MLX5_IB_MOD);
+ if (err)
+ pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
+ dev->interface_state = MLX5_INTERFACE_STATE_UP;
+out:
+ mutex_unlock(&dev->intf_state_mutex);
+
return 0;
+err_reg_dev:
+ mlx5_cleanup_mr_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+ mlx5_irq_clear_affinity_hints(dev);
+
err_unmap_bf_area:
unmap_bf_area(dev);
@@ -865,7 +1096,7 @@ err_stop_poll:
mlx5_stop_health_poll(dev);
if (mlx5_cmd_teardown_hca(dev)) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- return err;
+ goto out_err;
}
err_pagealloc_stop:
@@ -881,25 +1112,25 @@ err_pagealloc_cleanup:
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
-err_unmap:
- iounmap(dev->iseg);
-
-err_clr_master:
- pci_clear_master(dev->pdev);
- release_bar(dev->pdev);
-
-err_disable:
- pci_disable_device(dev->pdev);
+out_err:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ mutex_unlock(&dev->intf_state_mutex);
-err_dbg:
- debugfs_remove(priv->dbg_root);
return err;
}
-static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
{
- struct mlx5_priv *priv = &dev->priv;
+ int err = 0;
+ mutex_lock(&dev->intf_state_mutex);
+ if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+ dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
+ __func__);
+ goto out;
+ }
+ mlx5_unregister_device(dev);
+ mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
@@ -911,139 +1142,25 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
mlx5_stop_health_poll(dev);
- if (mlx5_cmd_teardown_hca(dev)) {
+ err = mlx5_cmd_teardown_hca(dev);
+ if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- return;
+ goto out;
}
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
- iounmap(dev->iseg);
- pci_clear_master(dev->pdev);
- release_bar(dev->pdev);
- pci_disable_device(dev->pdev);
- debugfs_remove(priv->dbg_root);
-}
-
-static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
- if (!dev_ctx) {
- pr_warn("mlx5_add_device: alloc context failed\n");
- return;
- }
-
- dev_ctx->intf = intf;
- dev_ctx->context = intf->add(dev);
-
- if (dev_ctx->context) {
- spin_lock_irq(&priv->ctx_lock);
- list_add_tail(&dev_ctx->list, &priv->ctx_list);
- spin_unlock_irq(&priv->ctx_lock);
- } else {
- kfree(dev_ctx);
- }
-}
-
-static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf == intf) {
- spin_lock_irq(&priv->ctx_lock);
- list_del(&dev_ctx->list);
- spin_unlock_irq(&priv->ctx_lock);
-
- intf->remove(dev, dev_ctx->context);
- kfree(dev_ctx);
- return;
- }
-}
-static int mlx5_register_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&priv->dev_list, &dev_list);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-static void mlx5_unregister_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_remove_device(intf, priv);
- list_del(&priv->dev_list);
- mutex_unlock(&intf_mutex);
-}
-
-int mlx5_register_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- if (!intf->add || !intf->remove)
- return -EINVAL;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_register_interface);
-
-void mlx5_unregister_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_remove_device(intf, priv);
- list_del(&intf->list);
- mutex_unlock(&intf_mutex);
-}
-EXPORT_SYMBOL(mlx5_unregister_interface);
-
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
- struct mlx5_priv *priv = &mdev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
- void *result = NULL;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
- if ((dev_ctx->intf->protocol == protocol) &&
- dev_ctx->intf->get_dev) {
- result = dev_ctx->intf->get_dev(dev_ctx->context);
- break;
- }
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
- return result;
+out:
+ dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+ mutex_unlock(&dev->intf_state_mutex);
+ return err;
}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
-static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param)
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_device_context *dev_ctx;
@@ -1064,7 +1181,6 @@ struct mlx5_core_event_handler {
void *data);
};
-#define MLX5_IB_MOD "mlx5_ib"
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -1088,43 +1204,166 @@ static int init_one(struct pci_dev *pdev,
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
+ dev->pdev = pdev;
dev->event = mlx5_core_event;
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
- err = mlx5_dev_init(dev, pdev);
+ mutex_init(&dev->pci_status_mutex);
+ mutex_init(&dev->intf_state_mutex);
+ err = mlx5_pci_init(dev, priv);
if (err) {
- dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
- goto out;
+ dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
+ goto clean_dev;
}
- err = mlx5_register_device(dev);
+ err = mlx5_health_init(dev);
if (err) {
- dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
- goto out_init;
+ dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
+ goto close_pci;
}
- err = request_module_nowait(MLX5_IB_MOD);
- if (err)
- pr_info("failed request module on %s\n", MLX5_IB_MOD);
+ err = mlx5_load_one(dev, priv);
+ if (err) {
+ dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
+ goto clean_health;
+ }
return 0;
-out_init:
- mlx5_dev_cleanup(dev);
-out:
+clean_health:
+ mlx5_health_cleanup(dev);
+close_pci:
+ mlx5_pci_close(dev, priv);
+clean_dev:
+ pci_set_drvdata(pdev, NULL);
kfree(dev);
+
return err;
}
+
static void remove_one(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
- mlx5_unregister_device(dev);
- mlx5_dev_cleanup(dev);
+ if (mlx5_unload_one(dev, priv)) {
+ dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
+ mlx5_health_cleanup(dev);
+ return;
+ }
+ mlx5_health_cleanup(dev);
+ mlx5_pci_close(dev, priv);
+ pci_set_drvdata(pdev, NULL);
kfree(dev);
}
+static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_enter_error_state(dev);
+ mlx5_unload_one(dev, priv);
+ mlx5_pci_disable_device(dev);
+ return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ int err = 0;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+
+ err = mlx5_pci_enable_device(dev);
+ if (err) {
+ dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+ , __func__, err);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+/* wait for the device to show vital signs. For now we check
+ * that we can read the device ID and that the health buffer
+ * shows a non zero value which is different than 0xffffffff
+ */
+static void wait_vital(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_core_health *health = &dev->priv.health;
+ const int niter = 100;
+ u32 count;
+ u16 did;
+ int i;
+
+ /* Wait for firmware to be ready after reset */
+ msleep(1000);
+ for (i = 0; i < niter; i++) {
+ if (pci_read_config_word(pdev, 2, &did)) {
+ dev_warn(&pdev->dev, "failed reading config word\n");
+ break;
+ }
+ if (did == pdev->device) {
+ dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
+ break;
+ }
+ msleep(50);
+ }
+ if (i == niter)
+ dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+
+ for (i = 0; i < niter; i++) {
+ count = ioread32be(health->health_counter);
+ if (count && count != 0xffffffff) {
+ dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ break;
+ }
+ msleep(50);
+ }
+
+ if (i == niter)
+ dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+ int err;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+
+ pci_save_state(pdev);
+ wait_vital(pdev);
+
+ err = mlx5_load_one(dev, priv);
+ if (err)
+ dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
+ , __func__, err);
+ else
+ dev_info(&pdev->dev, "%s: device recovered\n", __func__);
+}
+
+static const struct pci_error_handlers mlx5_err_handler = {
+ .error_detected = mlx5_pci_err_detected,
+ .slot_reset = mlx5_pci_slot_reset,
+ .resume = mlx5_pci_resume
+};
+
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
{ PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
@@ -1141,7 +1380,8 @@ static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
.probe = init_one,
- .remove = remove_one
+ .remove = remove_one,
+ .err_handler = &mlx5_err_handler
};
static int __init init(void)
@@ -1149,16 +1389,10 @@ static int __init init(void)
int err;
mlx5_register_debugfs();
- mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
- if (!mlx5_core_wq) {
- err = -ENOMEM;
- goto err_debug;
- }
- mlx5_health_init();
err = pci_register_driver(&mlx5_core_driver);
if (err)
- goto err_health;
+ goto err_debug;
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_init();
@@ -1166,9 +1400,6 @@ static int __init init(void)
return 0;
-err_health:
- mlx5_health_cleanup();
- destroy_workqueue(mlx5_core_wq);
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -1180,8 +1411,6 @@ static void __exit cleanup(void)
mlx5e_cleanup();
#endif
pci_unregister_driver(&mlx5_core_driver);
- mlx5_health_cleanup();
- destroy_workqueue(mlx5_core_wq);
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 566a70488db1..cee5b7a839bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -43,25 +43,25 @@
extern int mlx5_core_debug_mask;
-#define mlx5_core_dbg(dev, format, ...) \
- pr_debug("%s:%s:%d:(pid %d): " format, \
- (dev)->priv.name, __func__, __LINE__, current->pid, \
+#define mlx5_core_dbg(__dev, format, ...) \
+ dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
+ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_dbg_mask(dev, mask, format, ...) \
+#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
do { \
if ((mask) & mlx5_core_debug_mask) \
- mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
+ mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
} while (0)
-#define mlx5_core_err(dev, format, ...) \
- pr_err("%s:%s:%d:(pid %d): " format, \
- (dev)->priv.name, __func__, __LINE__, current->pid, \
+#define mlx5_core_err(__dev, format, ...) \
+ dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
+ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_warn(dev, format, ...) \
- pr_warn("%s:%s:%d:(pid %d): " format, \
- (dev)->priv.name, __func__, __LINE__, current->pid, \
+#define mlx5_core_warn(__dev, format, ...) \
+ dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
+ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
enum {
@@ -86,6 +86,10 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param);
+void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 1adb300dd850..6fa22b51e460 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -40,6 +40,7 @@ void mlx5_init_mr_table(struct mlx5_core_dev *dev)
{
struct mlx5_mr_table *table = &dev->priv.mr_table;
+ memset(table, 0, sizeof(*table));
rwlock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 8a64542abc16..1cda5d268ec9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -275,12 +275,36 @@ out_alloc:
return err;
}
+
+static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
+{
+ struct mlx5_manage_pages_inbox *in;
+ struct mlx5_manage_pages_outbox out;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
+ in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
+ in->func_id = cpu_to_be16(func_id);
+ err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ if (!err)
+ err = mlx5_cmd_status_to_err(&out.hdr);
+
+ if (err)
+ mlx5_core_warn(dev, "page notify failed\n");
+
+ kfree(in);
+}
+
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
struct mlx5_manage_pages_inbox *in;
struct mlx5_manage_pages_outbox out;
- struct mlx5_manage_pages_inbox *nin;
int inlen;
u64 addr;
int err;
@@ -289,8 +313,9 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
+ err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
- return -ENOMEM;
+ goto out_free;
}
memset(&out, 0, sizeof(out));
@@ -316,43 +341,29 @@ retry:
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
- goto out_alloc;
+ goto out_4k;
}
dev->priv.fw_pages += npages;
- if (out.hdr.status) {
- err = mlx5_cmd_status_to_err(&out.hdr);
- if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
- func_id, npages, out.hdr.status);
- goto out_alloc;
- }
+ err = mlx5_cmd_status_to_err(&out.hdr);
+ if (err) {
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+ func_id, npages, out.hdr.status);
+ goto out_4k;
}
mlx5_core_dbg(dev, "err %d\n", err);
- goto out_free;
-
-out_alloc:
- if (notify_fail) {
- nin = kzalloc(sizeof(*nin), GFP_KERNEL);
- if (!nin) {
- mlx5_core_warn(dev, "allocation failed\n");
- goto out_4k;
- }
- memset(&out, 0, sizeof(out));
- nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
- if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
- mlx5_core_warn(dev, "page notify failed\n");
- kfree(nin);
- }
+ kvfree(in);
+ return 0;
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, be64_to_cpu(in->pas[i]));
out_free:
kvfree(in);
+ if (notify_fail)
+ page_notify_fail(dev, func_id);
return err;
}
@@ -482,15 +493,20 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
struct fw_page *fwp;
struct rb_node *p;
int nclaimed = 0;
- int err;
+ int err = 0;
do {
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed);
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ free_4k(dev, fwp->addr);
+ nclaimed = 1;
+ } else {
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+ }
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 3b9480fa3403..a87e773e93f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -90,16 +90,13 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
{
struct mlx5_reg_pcap in;
struct mlx5_reg_pcap out;
- int err;
memset(&in, 0, sizeof(in));
in.caps_127_96 = cpu_to_be32(caps);
in.port_num = port_num;
- err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
- sizeof(out), MLX5_REG_PCAP, 0, 1);
-
- return err;
+ return mlx5_core_access_reg(dev, &in, sizeof(in), &out,
+ sizeof(out), MLX5_REG_PCAP, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
@@ -107,16 +104,13 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, local_port);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
- err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
- ptys_size, MLX5_REG_PTYS, 0, 0);
-
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+ ptys_size, MLX5_REG_PTYS, 0, 0);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
@@ -199,7 +193,6 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
memset(in, 0, sizeof(in));
@@ -210,9 +203,8 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
else
MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PTYS, 0, 1);
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
@@ -250,7 +242,7 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
return err;
*status = MLX5_GET(paos_reg, out, admin_status);
- return err;
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
@@ -308,15 +300,12 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
- int err;
memset(in, 0, sizeof(in));
MLX5_SET(pvlc_reg, in, local_port, local_port);
- err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
- pvlc_size, MLX5_REG_PVLC, 0, 0);
-
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+ pvlc_size, MLX5_REG_PVLC, 0, 0);
}
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
@@ -339,16 +328,14 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
- int err;
memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
MLX5_SET(pfcc_reg, in, pptx, tx_pause);
MLX5_SET(pfcc_reg, in, pprx, rx_pause);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PFCC, 0, 1);
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 8b494b562263..30e2ba3f5f16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -345,6 +345,7 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
+ memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
mlx5_qp_debugfs_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index c48f504ccbeb..ffada801976b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -531,6 +531,7 @@ void mlx5_init_srq_table(struct mlx5_core_dev *dev)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
+ memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index b4c87c7b0cf0..d7068f54e800 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -177,7 +177,7 @@ int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
memset(in, 0, sizeof(in));
@@ -206,7 +206,7 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
memset(in, 0, sizeof(in));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 2941d9c5ae48..e36e12219c9b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -30,3 +30,14 @@ config MLXSW_SWITCHX2
To compile this driver as a module, choose M here: the
module will be called mlxsw_switchx2.
+
+config MLXSW_SPECTRUM
+ tristate "Mellanox Technologies Spectrum support"
+ depends on MLXSW_CORE && NET_SWITCHDEV
+ default m
+ ---help---
+ This driver supports Mellanox Technologies Spectrum Ethernet
+ Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_spectrum.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0a05f65ee814..af015818fd19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -4,3 +4,6 @@ obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
mlxsw_pci-objs := pci.o
obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
mlxsw_switchx2-objs := switchx2.o
+obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
+mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
+ spectrum_switchdev.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 770db17eb03f..cd63b8263688 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -464,6 +464,8 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
* passed in this command must be pinned.
*/
+#define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32
+
static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
char *in_mbox, u32 vpm_entries_count)
{
@@ -568,7 +570,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
-/* cmd_mbox_config_profile_set_fid_based
+/* cmd_mbox_config_profile_set_flood_mode
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
@@ -649,12 +651,8 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
/* cmd_mbox_config_profile_max_flood_tables
- * Maximum number of Flooding Tables. Flooding Tables are associated to
- * the different packet types for the different switch partitions.
- * Note that the table size depends on the fid_based mode.
- * In SwitchX silicon, tables are split equally between the switch
- * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
- * with swid-1 and the last 4 are associated with swid-2.
+ * Maximum number of single-entry flooding tables. Different flooding tables
+ * can be associated with different packet types.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
@@ -665,15 +663,42 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
-/* cmd_mbox_config_profile_fid_based
- * FID Based Flood Mode
- * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
- * 01 Use FID to offset the index to the Port Group Table (pgi)
- * 10 Use FID to offset the index to the Port Group Table (pgi) and
- * the Multicast ID
+/* cmd_mbox_config_profile_flood_mode
+ * Flooding mode to use.
+ * 0-2 - Backward compatible modes for SwitchX devices.
+ * 3 - Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset tables.
+ * max_fid_flood_tables indicates the number of per-FID tables.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+/* cmd_mbox_config_profile_max_fid_offset_flood_tables
+ * Maximum number of FID-offset flooding tables.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ max_fid_offset_flood_tables, 0x34, 24, 4);
+
+/* cmd_mbox_config_profile_fid_offset_flood_table_size
+ * The size (number of entries) of each FID-offset flood table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ fid_offset_flood_table_size, 0x34, 0, 16);
+
+/* cmd_mbox_config_profile_max_fid_flood_tables
+ * Maximum number of per-FID flooding tables.
+ *
+ * Note: This flooding tables cover special FIDs only (vFIDs), starting at
+ * FID value 4K and higher.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4);
+
+/* cmd_mbox_config_profile_fid_flood_table_size
+ * The size (number of entries) of each per-FID table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16);
+
/* cmd_mbox_config_profile_max_ib_mc
* Maximum number of multicast FDB records for InfiniBand
* FDB (in 512 chunks) per InfiniBand switch partition.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 28c19cc1a17c..97f0d93caf99 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -287,7 +287,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
mlxsw_emad_op_tlv_status_set(op_tlv, 0);
mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
- if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+ if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
mlxsw_emad_op_tlv_method_set(op_tlv,
MLXSW_EMAD_OP_TLV_METHOD_QUERY);
else
@@ -362,7 +362,7 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
char *op_tlv;
op_tlv = mlxsw_emad_op_tlv(skb);
- return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+ return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
}
#define MLXSW_EMAD_TIMEOUT_MS 200
@@ -511,7 +511,6 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
return err;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
- MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_TRAP_ID_ETHEMAD);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
}
@@ -556,8 +555,8 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
{
char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ mlxsw_core->emad.use_emad = false;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
- MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_TRAP_ID_ETHEMAD);
mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 165808471188..807827350a89 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -54,6 +54,7 @@
MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+#define MLXSW_DEVICE_KIND_SPECTRUM "spectrum"
struct mlxsw_core;
struct mlxsw_driver;
@@ -153,6 +154,10 @@ struct mlxsw_config_profile {
u8 max_flood_tables;
u8 max_vid_flood_tables;
u8 flood_mode;
+ u8 max_fid_offset_flood_tables;
+ u16 fid_offset_flood_table_size;
+ u8 max_fid_flood_tables;
+ u16 fid_flood_table_size;
u16 max_ib_mc;
u16 max_pkey;
u8 ar_sec;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 36fb1cec53c9..a94dbda6590b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -171,15 +171,21 @@ static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
}
static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
- struct mlxsw_item *item)
+ struct mlxsw_item *item,
+ unsigned short index)
{
- memcpy(dst, &buf[item->offset], item->size.bytes);
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ memcpy(dst, &buf[offset], item->size.bytes);
}
-static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
- struct mlxsw_item *item)
+static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
+ struct mlxsw_item *item,
+ unsigned short index)
{
- memcpy(&buf[item->offset], src, item->size.bytes);
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ memcpy(&buf[offset], src, item->size.bytes);
}
static inline u16
@@ -373,12 +379,40 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
static inline void \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \
{ \
- __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+ __mlxsw_item_memcpy_from(buf, dst, \
+ &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
+{ \
+ __mlxsw_item_memcpy_to(buf, src, \
+ &__ITEM_NAME(_type, _cname, _iname), 0); \
+}
+
+#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \
+ _step, _instepoffset) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, \
+ unsigned short index, \
+ char *dst) \
+{ \
+ __mlxsw_item_memcpy_from(buf, dst, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
} \
static inline void \
-mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
+ unsigned short index, \
+ const char *src) \
{ \
- __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \
+ __mlxsw_item_memcpy_to(buf, src, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
}
#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index cef866c37648..de69e719dc9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -57,6 +57,7 @@ static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
static const struct pci_device_id mlxsw_pci_id_table[] = {
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
{0, }
};
@@ -67,6 +68,8 @@ static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
switch (id->device) {
case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
return MLXSW_DEVICE_KIND_SWITCHX2;
+ case PCI_DEVICE_ID_MELLANOX_SPECTRUM:
+ return MLXSW_DEVICE_KIND_SPECTRUM;
default:
BUG();
}
@@ -171,8 +174,8 @@ struct mlxsw_pci {
struct msix_entry msix_entry;
struct mlxsw_core *core;
struct {
- u16 num_pages;
struct mlxsw_pci_mem_item *items;
+ unsigned int count;
} fw_area;
struct {
struct mlxsw_pci_mem_item out_mbox;
@@ -431,8 +434,7 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
- if (net_ratelimit())
- dev_err(&pdev->dev, "failed to dma map tx frag\n");
+ dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO;
}
mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
@@ -497,6 +499,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
+ u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
int i;
int err;
@@ -504,9 +507,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->consumer_counter = 0;
/* Set CQ of same number of this RDQ with base
- * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+ * above SDQ count as the lower ones are assigned to SDQs.
*/
- mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -699,8 +702,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
put_new_skb:
memset(wqe, 0, q->elem_size);
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
- if (err && net_ratelimit())
- dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ if (err)
+ dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -830,7 +833,8 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
{
struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
struct mlxsw_pci *mlxsw_pci = q->pci;
- unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+ u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
+ unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
char *eqe;
u8 cqn;
bool cq_handle = false;
@@ -866,7 +870,7 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
if (!cq_handle)
return;
- for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+ for_each_set_bit(cqn, active_cqns, cq_count) {
q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
mlxsw_pci_queue_tasklet_schedule(q);
}
@@ -1067,10 +1071,8 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
- if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
- (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
- (num_cqs != MLXSW_PCI_CQS_COUNT) ||
- (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+ if (num_sdqs + num_rdqs > num_cqs ||
+ num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
dev_err(&pdev->dev, "Unsupported number of queues\n");
return -EINVAL;
}
@@ -1215,6 +1217,14 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mbox, profile->max_flood_tables);
mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
mbox, profile->max_vid_flood_tables);
+ mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
+ mbox, profile->max_fid_offset_flood_tables);
+ mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
+ mbox, profile->fid_offset_flood_table_size);
+ mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
+ mbox, profile->max_fid_flood_tables);
+ mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
+ mbox, profile->fid_flood_table_size);
}
if (profile->used_flood_mode) {
mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
@@ -1272,6 +1282,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
u16 num_pages)
{
struct mlxsw_pci_mem_item *mem_item;
+ int nent = 0;
int i;
int err;
@@ -1279,7 +1290,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
GFP_KERNEL);
if (!mlxsw_pci->fw_area.items)
return -ENOMEM;
- mlxsw_pci->fw_area.num_pages = num_pages;
+ mlxsw_pci->fw_area.count = num_pages;
mlxsw_cmd_mbox_zero(mbox);
for (i = 0; i < num_pages; i++) {
@@ -1293,13 +1304,22 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err = -ENOMEM;
goto err_alloc;
}
- mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
- mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+ mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
+ mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
+ if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+ if (err)
+ goto err_cmd_map_fa;
+ nent = 0;
+ mlxsw_cmd_mbox_zero(mbox);
+ }
}
- err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
- if (err)
- goto err_cmd_map_fa;
+ if (nent) {
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+ if (err)
+ goto err_cmd_map_fa;
+ }
return 0;
@@ -1322,7 +1342,7 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
mlxsw_cmd_unmap_fa(mlxsw_pci->core);
- for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+ for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
mem_item = &mlxsw_pci->fw_area.items[i];
pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
@@ -1642,8 +1662,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
CIR_OUT_PARAM_LO));
memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
}
- } else if (!err && out_mbox)
+ } else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+ }
mutex_unlock(&mlxsw_pci->cmd.lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 1ef9664b4512..142f33d978c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -40,6 +40,7 @@
#include "item.h"
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */
#define MLXSW_PCI_PAGE_SIZE 4096
@@ -71,9 +72,7 @@
#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
((offset) + (type_offset) + (num) * 4)
-#define MLXSW_PCI_RDQS_COUNT 24
-#define MLXSW_PCI_SDQS_COUNT 24
-#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_CQS_MAX 96
#define MLXSW_PCI_EQS_COUNT 2
#define MLXSW_PCI_EQ_ASYNC_NUM 0
#define MLXSW_PCI_EQ_COMP_NUM 1
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 096e1c12175a..236fb5d2ad69 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -99,57 +99,6 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = {
*/
MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
-/* SMID - Switch Multicast ID
- * --------------------------
- * In multi-chip configuration, each device should maintain mapping between
- * Multicast ID (MID) into a list of local ports. This mapping is used in all
- * the devices other than the ingress device, and is implemented as part of the
- * FDB. The MID record maps from a MID, which is a unique identi- fier of the
- * multicast group within the stacking domain, into a list of local ports into
- * which the packet is replicated.
- */
-#define MLXSW_REG_SMID_ID 0x2007
-#define MLXSW_REG_SMID_LEN 0x420
-
-static const struct mlxsw_reg_info mlxsw_reg_smid = {
- .id = MLXSW_REG_SMID_ID,
- .len = MLXSW_REG_SMID_LEN,
-};
-
-/* reg_smid_swid
- * Switch partition ID.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
-
-/* reg_smid_mid
- * Multicast identifier - global identifier that represents the multicast group
- * across all devices
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
-
-/* reg_smid_port
- * Local port memebership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
-
-/* reg_smid_port_mask
- * Local port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
-{
- MLXSW_REG_ZERO(smid, payload);
- mlxsw_reg_smid_swid_set(payload, 0);
- mlxsw_reg_smid_mid_set(payload, mid);
- mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
- mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
-}
-
/* SSPR - Switch System Port Record Register
* -----------------------------------------
* Configures the system port to local port mapping.
@@ -208,11 +157,359 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
mlxsw_reg_sspr_system_port_set(payload, local_port);
}
+/* SFDAT - Switch Filtering Database Aging Time
+ * --------------------------------------------
+ * Controls the Switch aging time. Aging time is able to be set per Switch
+ * Partition.
+ */
+#define MLXSW_REG_SFDAT_ID 0x2009
+#define MLXSW_REG_SFDAT_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sfdat = {
+ .id = MLXSW_REG_SFDAT_ID,
+ .len = MLXSW_REG_SFDAT_LEN,
+};
+
+/* reg_sfdat_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8);
+
+/* reg_sfdat_age_time
+ * Aging time in seconds
+ * Min - 10 seconds
+ * Max - 1,000,000 seconds
+ * Default is 300 seconds.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20);
+
+static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time)
+{
+ MLXSW_REG_ZERO(sfdat, payload);
+ mlxsw_reg_sfdat_swid_set(payload, 0);
+ mlxsw_reg_sfdat_age_time_set(payload, age_time);
+}
+
+/* SFD - Switch Filtering Database
+ * -------------------------------
+ * The following register defines the access to the filtering database.
+ * The register supports querying, adding, removing and modifying the database.
+ * The access is optimized for bulk updates in which case more than one
+ * FDB record is present in the same command.
+ */
+#define MLXSW_REG_SFD_ID 0x200A
+#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFD_REC_MAX_COUNT 64
+#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \
+ MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfd = {
+ .id = MLXSW_REG_SFD_ID,
+ .len = MLXSW_REG_SFD_LEN,
+};
+
+/* reg_sfd_swid
+ * Switch partition ID for queries. Reserved on Write.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfd_op {
+ /* Dump entire FDB a (process according to record_locator) */
+ MLXSW_REG_SFD_OP_QUERY_DUMP = 0,
+ /* Query records by {MAC, VID/FID} value */
+ MLXSW_REG_SFD_OP_QUERY_QUERY = 1,
+ /* Query and clear activity. Query records by {MAC, VID/FID} value */
+ MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2,
+ /* Test. Response indicates if each of the records could be
+ * added to the FDB.
+ */
+ MLXSW_REG_SFD_OP_WRITE_TEST = 0,
+ /* Add/modify. Aged-out records cannot be added. This command removes
+ * the learning notification of the {MAC, VID/FID}. Response includes
+ * the entries that were added to the FDB.
+ */
+ MLXSW_REG_SFD_OP_WRITE_EDIT = 1,
+ /* Remove record by {MAC, VID/FID}. This command also removes
+ * the learning notification and aged-out notifications
+ * of the {MAC, VID/FID}. The response provides current (pre-removal)
+ * entries as non-aged-out.
+ */
+ MLXSW_REG_SFD_OP_WRITE_REMOVE = 2,
+ /* Remove learned notification by {MAC, VID/FID}. The response provides
+ * the removed learning notification.
+ */
+ MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2,
+};
+
+/* reg_sfd_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2);
+
+/* reg_sfd_record_locator
+ * Used for querying the FDB. Use record_locator=0 to initiate the
+ * query. When a record is returned, a new record_locator is
+ * returned to be used in the subsequent query.
+ * Reserved for database update.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30);
+
+/* reg_sfd_num_rec
+ * Request: Number of records to read/add/modify/remove
+ * Response: Number of records read/added/replaced/removed
+ * See above description for more details.
+ * Ranges 0..64
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8);
+
+static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op,
+ u32 record_locator)
+{
+ MLXSW_REG_ZERO(sfd, payload);
+ mlxsw_reg_sfd_op_set(payload, op);
+ mlxsw_reg_sfd_record_locator_set(payload, record_locator);
+}
+
+/* reg_sfd_rec_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_type {
+ MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
+};
+
+/* reg_sfd_rec_type
+ * FDB record type.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_policy {
+ /* Replacement disabled, aging disabled. */
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0,
+ /* (mlag remote): Replacement enabled, aging disabled,
+ * learning notification enabled on this port.
+ */
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1,
+ /* (ingress device): Replacement enabled, aging enabled. */
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3,
+};
+
+/* reg_sfd_rec_policy
+ * Policy.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_a
+ * Activity. Set for new static entries. Set for static entries if a frame SMAC
+ * lookup hits on the entry.
+ * To clear the a bit, use "query and clear activity" op.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_mac
+ * MAC address.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6,
+ MLXSW_REG_SFD_REC_LEN, 0x02);
+
+enum mlxsw_reg_sfd_rec_action {
+ /* forward */
+ MLXSW_REG_SFD_REC_ACTION_NOP = 0,
+ /* forward and trap, trap_id is FDB_TRAP */
+ MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
+ /* trap and do not forward, trap_id is FDB_TRAP */
+ MLXSW_REG_SFD_REC_ACTION_TRAP = 3,
+ MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
+};
+
+/* reg_sfd_rec_action
+ * Action to apply on the packet.
+ * Note: Dynamic entries can only be configured with NOP action.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_sub_port
+ * VEPA channel on local port.
+ * Valid only if local port is a non-stacking port. Must be 0 if multichannel
+ * VEPA is not enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_fid_vid
+ * Filtering ID or VLAN ID
+ * For SwitchX and SwitchX-2:
+ * - Dynamic entries (policy 2,3) use FID
+ * - Static entries (policy 0) use VID
+ * - When independent learning is configured, VID=FID
+ * For Spectrum: use FID for both Dynamic and Static entries.
+ * VID should not be used.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 vid,
+ enum mlxsw_reg_sfd_rec_action action,
+ u8 local_port)
+{
+ u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1);
+ mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0);
+ mlxsw_reg_sfd_rec_type_set(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST);
+ mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+ mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac);
+ mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
+ mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid);
+ mlxsw_reg_sfd_rec_action_set(payload, rec_index, action);
+ mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
+}
+
+static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
+ char *mac, u16 *p_vid,
+ u8 *p_local_port)
+{
+ mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
+ *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
+}
+
+/* SFN - Switch FDB Notification Register
+ * -------------------------------------------
+ * The switch provides notifications on newly learned FDB entries and
+ * aged out entries. The notifications can be polled by software.
+ */
+#define MLXSW_REG_SFN_ID 0x200B
+#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFN_REC_MAX_COUNT 64
+#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \
+ MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfn = {
+ .id = MLXSW_REG_SFN_ID,
+ .len = MLXSW_REG_SFN_LEN,
+};
+
+/* reg_sfn_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
+
+/* reg_sfn_num_rec
+ * Request: Number of learned notifications and aged-out notification
+ * records requested.
+ * Response: Number of notification records returned (must be smaller
+ * than or equal to the value requested)
+ * Ranges 0..64
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8);
+
+static inline void mlxsw_reg_sfn_pack(char *payload)
+{
+ MLXSW_REG_ZERO(sfn, payload);
+ mlxsw_reg_sfn_swid_set(payload, 0);
+ mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
+}
+
+/* reg_sfn_rec_swid
+ * Switch partition ID.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8,
+ MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfn_rec_type {
+ /* MAC addresses learned on a regular port. */
+ MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5,
+ /* Aged-out MAC address on a regular port */
+ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
+};
+
+/* reg_sfn_rec_type
+ * Notification record type.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
+ MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+/* reg_sfn_rec_mac
+ * MAC address.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
+ MLXSW_REG_SFN_REC_LEN, 0x02);
+
+/* reg_sfn_mac_sub_port
+ * VEPA channel on the local port.
+ * 0 if multichannel VEPA is not enabled.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
+ MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_fid
+ * Filtering identifier.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+ MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+ MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
+ char *mac, u16 *p_vid,
+ u8 *p_local_port)
+{
+ mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+ *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index);
+}
+
/* SPMS - Switch Port MSTP/RSTP State Register
* -------------------------------------------
* Configures the spanning tree state of a physical port.
*/
-#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_ID 0x200D
#define MLXSW_REG_SPMS_LEN 0x404
static const struct mlxsw_reg_info mlxsw_reg_spms = {
@@ -243,20 +540,166 @@ enum mlxsw_reg_spms_state {
*/
MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
-static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
- enum mlxsw_reg_spms_state state)
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
{
MLXSW_REG_ZERO(spms, payload);
mlxsw_reg_spms_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
+ enum mlxsw_reg_spms_state state)
+{
mlxsw_reg_spms_state_set(payload, vid, state);
}
+/* SPVID - Switch Port VID
+ * -----------------------
+ * The switch port VID configures the default VID for a port.
+ */
+#define MLXSW_REG_SPVID_ID 0x200E
+#define MLXSW_REG_SPVID_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_spvid = {
+ .id = MLXSW_REG_SPVID_ID,
+ .len = MLXSW_REG_SPVID_LEN,
+};
+
+/* reg_spvid_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
+
+/* reg_spvid_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+
+/* reg_spvid_pvid
+ * Port default VID
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
+
+static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
+{
+ MLXSW_REG_ZERO(spvid, payload);
+ mlxsw_reg_spvid_local_port_set(payload, local_port);
+ mlxsw_reg_spvid_pvid_set(payload, pvid);
+}
+
+/* SPVM - Switch Port VLAN Membership
+ * ----------------------------------
+ * The Switch Port VLAN Membership register configures the VLAN membership
+ * of a port in a VLAN denoted by VID. VLAN membership is managed per
+ * virtual port. The register can be used to add and remove VID(s) from a port.
+ */
+#define MLXSW_REG_SPVM_ID 0x200F
+#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
+ MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvm = {
+ .id = MLXSW_REG_SPVM_ID,
+ .len = MLXSW_REG_SPVM_LEN,
+};
+
+/* reg_spvm_pt
+ * Priority tagged. If this bit is set, packets forwarded to the port with
+ * untagged VLAN membership (u bit is set) will be tagged with priority tag
+ * (VID=0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1);
+
+/* reg_spvm_pte
+ * Priority Tagged Update Enable. On Write operations, if this bit is cleared,
+ * the pt bit will NOT be updated. To update the pt bit, pte must be set.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
+
+/* reg_spvm_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
+
+/* reg_spvm_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8);
+
+/* reg_spvm_num_rec
+ * Number of records to update. Each record contains: i, e, u, vid.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8);
+
+/* reg_spvm_rec_i
+ * Ingress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_i,
+ MLXSW_REG_SPVM_BASE_LEN, 14, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_e
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_e,
+ MLXSW_REG_SPVM_BASE_LEN, 13, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_u
+ * Untagged - port is an untagged member - egress transmission uses untagged
+ * frames on VID<n>
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_u,
+ MLXSW_REG_SPVM_BASE_LEN, 12, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_vid
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
+ MLXSW_REG_SPVM_BASE_LEN, 0, 12,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
+ u16 vid_begin, u16 vid_end,
+ bool is_member, bool untagged)
+{
+ int size = vid_end - vid_begin + 1;
+ int i;
+
+ MLXSW_REG_ZERO(spvm, payload);
+ mlxsw_reg_spvm_local_port_set(payload, local_port);
+ mlxsw_reg_spvm_num_rec_set(payload, size);
+
+ for (i = 0; i < size; i++) {
+ mlxsw_reg_spvm_rec_i_set(payload, i, is_member);
+ mlxsw_reg_spvm_rec_e_set(payload, i, is_member);
+ mlxsw_reg_spvm_rec_u_set(payload, i, untagged);
+ mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i);
+ }
+}
+
/* SFGC - Switch Flooding Group Configuration
* ------------------------------------------
* The following register controls the association of flooding tables and MIDs
* to packet types used for flooding.
*/
-#define MLXSW_REG_SFGC_ID 0x2011
+#define MLXSW_REG_SFGC_ID 0x2011
#define MLXSW_REG_SFGC_LEN 0x10
static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
@@ -265,13 +708,15 @@ static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
};
enum mlxsw_reg_sfgc_type {
- MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
- MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
- MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
- MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
- MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
- MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
- MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+ MLXSW_REG_SFGC_TYPE_BROADCAST,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+ MLXSW_REG_SFGC_TYPE_RESERVED,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+ MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL,
+ MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST,
+ MLXSW_REG_SFGC_TYPE_MAX,
};
/* reg_sfgc_type
@@ -408,7 +853,7 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
unsigned int flood_table,
unsigned int index,
enum mlxsw_flood_table_type table_type,
- unsigned int range)
+ unsigned int range, u8 port, bool set)
{
MLXSW_REG_ZERO(sftr, payload);
mlxsw_reg_sftr_swid_set(payload, 0);
@@ -416,8 +861,8 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
mlxsw_reg_sftr_index_set(payload, index);
mlxsw_reg_sftr_table_type_set(payload, table_type);
mlxsw_reg_sftr_range_set(payload, range);
- mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
- mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+ mlxsw_reg_sftr_port_set(payload, port, set);
+ mlxsw_reg_sftr_port_mask_set(payload, port, 1);
}
/* SPMLR - Switch Port MAC Learning Register
@@ -473,6 +918,285 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
mlxsw_reg_spmlr_learn_mode_set(payload, mode);
}
+/* SVFA - Switch VID to FID Allocation Register
+ * --------------------------------------------
+ * Controls the VID to FID mapping and {Port, VID} to FID mapping for
+ * virtualized ports.
+ */
+#define MLXSW_REG_SVFA_ID 0x201C
+#define MLXSW_REG_SVFA_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_svfa = {
+ .id = MLXSW_REG_SVFA_ID,
+ .len = MLXSW_REG_SVFA_LEN,
+};
+
+/* reg_svfa_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
+
+/* reg_svfa_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_svfa_mt {
+ MLXSW_REG_SVFA_MT_VID_TO_FID,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+};
+
+/* reg_svfa_mapping_table
+ * Mapping table:
+ * 0 - VID to FID
+ * 1 - {Port, VID} to FID
+ * Access: Index
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3);
+
+/* reg_svfa_v
+ * Valid.
+ * Valid if set.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1);
+
+/* reg_svfa_fid
+ * Filtering ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16);
+
+/* reg_svfa_vid
+ * VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12);
+
+/* reg_svfa_counter_set_type
+ * Counter set type for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
+
+/* reg_svfa_counter_index
+ * Counter index for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
+
+static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_svfa_mt mt, bool valid,
+ u16 fid, u16 vid)
+{
+ MLXSW_REG_ZERO(svfa, payload);
+ local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
+ mlxsw_reg_svfa_swid_set(payload, 0);
+ mlxsw_reg_svfa_local_port_set(payload, local_port);
+ mlxsw_reg_svfa_mapping_table_set(payload, mt);
+ mlxsw_reg_svfa_v_set(payload, valid);
+ mlxsw_reg_svfa_fid_set(payload, fid);
+ mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+/* SVPE - Switch Virtual-Port Enabling Register
+ * --------------------------------------------
+ * Enables port virtualization.
+ */
+#define MLXSW_REG_SVPE_ID 0x201E
+#define MLXSW_REG_SVPE_LEN 0x4
+
+static const struct mlxsw_reg_info mlxsw_reg_svpe = {
+ .id = MLXSW_REG_SVPE_ID,
+ .len = MLXSW_REG_SVPE_LEN,
+};
+
+/* reg_svpe_local_port
+ * Local port number
+ * Access: Index
+ *
+ * Note: CPU port is not supported (uses VLAN mode only).
+ */
+MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
+
+/* reg_svpe_vp_en
+ * Virtual port enable.
+ * 0 - Disable, VLAN mode (VID to FID).
+ * 1 - Enable, Virtual port mode ({Port, VID} to FID).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
+
+static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
+ bool enable)
+{
+ MLXSW_REG_ZERO(svpe, payload);
+ mlxsw_reg_svpe_local_port_set(payload, local_port);
+ mlxsw_reg_svpe_vp_en_set(payload, enable);
+}
+
+/* SFMR - Switch FID Management Register
+ * -------------------------------------
+ * Creates and configures FIDs.
+ */
+#define MLXSW_REG_SFMR_ID 0x201F
+#define MLXSW_REG_SFMR_LEN 0x18
+
+static const struct mlxsw_reg_info mlxsw_reg_sfmr = {
+ .id = MLXSW_REG_SFMR_ID,
+ .len = MLXSW_REG_SFMR_LEN,
+};
+
+enum mlxsw_reg_sfmr_op {
+ MLXSW_REG_SFMR_OP_CREATE_FID,
+ MLXSW_REG_SFMR_OP_DESTROY_FID,
+};
+
+/* reg_sfmr_op
+ * Operation.
+ * 0 - Create or edit FID.
+ * 1 - Destroy FID.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
+
+/* reg_sfmr_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+
+/* reg_sfmr_fid_offset
+ * FID offset.
+ * Used to point into the flooding table selected by SFGC register if
+ * the table is of type FID-Offset. Otherwise, this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
+
+/* reg_sfmr_vtfp
+ * Valid Tunnel Flood Pointer.
+ * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1);
+
+/* reg_sfmr_nve_tunnel_flood_ptr
+ * Underlay Flooding and BC Pointer.
+ * Used as a pointer to the first entry of the group based link lists of
+ * flooding or BC entries (for NVE tunnels).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24);
+
+/* reg_sfmr_vv
+ * VNI Valid.
+ * If not set, then vni is reserved.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
+
+/* reg_sfmr_vni
+ * Virtual Network Identifier.
+ * Access: RW
+ *
+ * Note: A given VNI can only be assigned to one FID.
+ */
+MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+
+static inline void mlxsw_reg_sfmr_pack(char *payload,
+ enum mlxsw_reg_sfmr_op op, u16 fid,
+ u16 fid_offset)
+{
+ MLXSW_REG_ZERO(sfmr, payload);
+ mlxsw_reg_sfmr_op_set(payload, op);
+ mlxsw_reg_sfmr_fid_set(payload, fid);
+ mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
+ mlxsw_reg_sfmr_vtfp_set(payload, false);
+ mlxsw_reg_sfmr_vv_set(payload, false);
+}
+
+/* SPVMLR - Switch Port VLAN MAC Learning Register
+ * -----------------------------------------------
+ * Controls the switch MAC learning policy per {Port, VID}.
+ */
+#define MLXSW_REG_SPVMLR_ID 0x2020
+#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
+ MLXSW_REG_SPVMLR_REC_LEN * \
+ MLXSW_REG_SPVMLR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvmlr = {
+ .id = MLXSW_REG_SPVMLR_ID,
+ .len = MLXSW_REG_SPVMLR_LEN,
+};
+
+/* reg_spvmlr_local_port
+ * Local ingress port.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
+
+/* reg_spvmlr_num_rec
+ * Number of records to update.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8);
+
+/* reg_spvmlr_rec_learn_enable
+ * 0 - Disable learning for {Port, VID}.
+ * 1 - Enable learning for {Port, VID}.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
+ 31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+/* reg_spvmlr_rec_vid
+ * VLAN ID to be added/removed from port or for querying.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
+ MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
+{
+ int num_rec = vid_end - vid_begin + 1;
+ int i;
+
+ WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT);
+
+ MLXSW_REG_ZERO(spvmlr, payload);
+ mlxsw_reg_spvmlr_local_port_set(payload, local_port);
+ mlxsw_reg_spvmlr_num_rec_set(payload, num_rec);
+
+ for (i = 0; i < num_rec; i++) {
+ mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable);
+ mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i);
+ }
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -1008,12 +1732,88 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
}
+/* PBMC - Port Buffer Management Control Register
+ * ----------------------------------------------
+ * The PBMC register configures and retrieves the port packet buffer
+ * allocation for different Prios, and the Pause threshold management.
+ */
+#define MLXSW_REG_PBMC_ID 0x500C
+#define MLXSW_REG_PBMC_LEN 0x68
+
+static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
+ .id = MLXSW_REG_PBMC_ID,
+ .len = MLXSW_REG_PBMC_LEN,
+};
+
+/* reg_pbmc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
+
+/* reg_pbmc_xoff_timer_value
+ * When device generates a pause frame, it uses this value as the pause
+ * timer (time for the peer port to pause in quota-512 bit time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
+
+/* reg_pbmc_xoff_refresh
+ * The time before a new pause frame should be sent to refresh the pause RW
+ * state. Using the same units as xoff_timer_value above (in quota-512 bit
+ * time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
+
+/* reg_pbmc_buf_lossy
+ * The field indicates if the buffer is lossy.
+ * 0 - Lossless
+ * 1 - Lossy
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_epsb
+ * Eligible for Port Shared buffer.
+ * If epsb is set, packets assigned to buffer are allowed to insert the port
+ * shared buffer.
+ * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_size
+ * The part of the packet buffer array is allocated for the specific buffer.
+ * Units are represented in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
+
+static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
+ u16 xoff_timer_value, u16 xoff_refresh)
+{
+ MLXSW_REG_ZERO(pbmc, payload);
+ mlxsw_reg_pbmc_local_port_set(payload, local_port);
+ mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value);
+ mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh);
+}
+
+static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
+ int buf_index,
+ u16 size)
+{
+ mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1);
+ mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+}
+
/* PSPA - Port Switch Partition Allocation
* ---------------------------------------
* Controls the association of a port with a switch partition and enables
* configuring ports as stacking ports.
*/
-#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_ID 0x500D
#define MLXSW_REG_PSPA_LEN 0x8
static const struct mlxsw_reg_info mlxsw_reg_pspa = {
@@ -1074,8 +1874,11 @@ MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
*/
MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
-#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0
-#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1
+enum mlxsw_reg_htgt_trap_group {
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ MLXSW_REG_HTGT_TRAP_GROUP_CTRL,
+};
/* reg_htgt_trap_group
* Trap group number. User defined number specifying which trap groups
@@ -1142,6 +1945,7 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13
/* reg_htgt_local_path_rdq
* Receive descriptor queue (RDQ) to use for the trap group.
@@ -1149,21 +1953,29 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
*/
MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
-static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+static inline void mlxsw_reg_htgt_pack(char *payload,
+ enum mlxsw_reg_htgt_trap_group group)
{
u8 swid, rdq;
MLXSW_REG_ZERO(htgt, payload);
- if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+ switch (group) {
+ case MLXSW_REG_HTGT_TRAP_GROUP_EMAD:
swid = MLXSW_PORT_SWID_ALL_SWIDS;
rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
- } else {
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_RX:
swid = 0;
rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_CTRL:
+ swid = 0;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL;
+ break;
}
mlxsw_reg_htgt_swid_set(payload, swid);
mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
- mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+ mlxsw_reg_htgt_trap_group_set(payload, group);
mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
mlxsw_reg_htgt_pid_set(payload, 0);
mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
@@ -1254,17 +2066,290 @@ enum {
*/
MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
-static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
- u8 trap_group, u16 trap_id)
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
{
+ enum mlxsw_reg_htgt_trap_group trap_group;
+
MLXSW_REG_ZERO(hpkt, payload);
mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
mlxsw_reg_hpkt_action_set(payload, action);
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_ETHEMAD:
+ case MLXSW_TRAP_ID_PUDE:
+ trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD;
+ break;
+ default:
+ trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX;
+ break;
+ }
mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
}
+/* SBPR - Shared Buffer Pools Register
+ * -----------------------------------
+ * The SBPR configures and retrieves the shared buffer pools and configuration.
+ */
+#define MLXSW_REG_SBPR_ID 0xB001
+#define MLXSW_REG_SBPR_LEN 0x14
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
+ .id = MLXSW_REG_SBPR_ID,
+ .len = MLXSW_REG_SBPR_LEN,
+};
+
+enum mlxsw_reg_sbpr_dir {
+ MLXSW_REG_SBPR_DIR_INGRESS,
+ MLXSW_REG_SBPR_DIR_EGRESS,
+};
+
+/* reg_sbpr_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2);
+
+/* reg_sbpr_pool
+ * Pool index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
+
+/* reg_sbpr_size
+ * Pool size in buffer cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24);
+
+enum mlxsw_reg_sbpr_mode {
+ MLXSW_REG_SBPR_MODE_STATIC,
+ MLXSW_REG_SBPR_MODE_DYNAMIC,
+};
+
+/* reg_sbpr_mode
+ * Pool quota calculation mode.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
+
+static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
+ enum mlxsw_reg_sbpr_dir dir,
+ enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+ MLXSW_REG_ZERO(sbpr, payload);
+ mlxsw_reg_sbpr_pool_set(payload, pool);
+ mlxsw_reg_sbpr_dir_set(payload, dir);
+ mlxsw_reg_sbpr_mode_set(payload, mode);
+ mlxsw_reg_sbpr_size_set(payload, size);
+}
+
+/* SBCM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBCM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-PG, including the binding to pool
+ * and definition of the associated quota.
+ */
+#define MLXSW_REG_SBCM_ID 0xB002
+#define MLXSW_REG_SBCM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbcm = {
+ .id = MLXSW_REG_SBCM_ID,
+ .len = MLXSW_REG_SBCM_LEN,
+};
+
+/* reg_sbcm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
+
+/* reg_sbcm_pg_buff
+ * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
+ * For PG buffer: range is 0..cap_max_pg_buffers - 1
+ * For traffic class: range is 0..cap_max_tclass - 1
+ * Note that when traffic class is in MC aware mode then the traffic
+ * classes which are MC aware cannot be configured.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
+
+enum mlxsw_reg_sbcm_dir {
+ MLXSW_REG_SBCM_DIR_INGRESS,
+ MLXSW_REG_SBCM_DIR_EGRESS,
+};
+
+/* reg_sbcm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
+
+/* reg_sbcm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
+
+/* reg_sbcm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbcm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
+ enum mlxsw_reg_sbcm_dir dir,
+ u32 min_buff, u32 max_buff, u8 pool)
+{
+ MLXSW_REG_ZERO(sbcm, payload);
+ mlxsw_reg_sbcm_local_port_set(payload, local_port);
+ mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff);
+ mlxsw_reg_sbcm_dir_set(payload, dir);
+ mlxsw_reg_sbcm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbcm_max_buff_set(payload, max_buff);
+ mlxsw_reg_sbcm_pool_set(payload, pool);
+}
+
+/* SBPM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBPM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-Pool, including the definition
+ * of the associated quota.
+ */
+#define MLXSW_REG_SBPM_ID 0xB003
+#define MLXSW_REG_SBPM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpm = {
+ .id = MLXSW_REG_SBPM_ID,
+ .len = MLXSW_REG_SBPM_LEN,
+};
+
+/* reg_sbpm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
+
+/* reg_sbpm_pool
+ * The pool associated to quota counting on the local_port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
+
+enum mlxsw_reg_sbpm_dir {
+ MLXSW_REG_SBPM_DIR_INGRESS,
+ MLXSW_REG_SBPM_DIR_EGRESS,
+};
+
+/* reg_sbpm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
+
+/* reg_sbpm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
+
+/* reg_sbpm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
+ enum mlxsw_reg_sbpm_dir dir,
+ u32 min_buff, u32 max_buff)
+{
+ MLXSW_REG_ZERO(sbpm, payload);
+ mlxsw_reg_sbpm_local_port_set(payload, local_port);
+ mlxsw_reg_sbpm_pool_set(payload, pool);
+ mlxsw_reg_sbpm_dir_set(payload, dir);
+ mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
+}
+
+/* SBMM - Shared Buffer Multicast Management Register
+ * --------------------------------------------------
+ * The SBMM register configures and retrieves the shared buffer allocation
+ * and configuration for MC packets according to Switch-Priority, including
+ * the binding to pool and definition of the associated quota.
+ */
+#define MLXSW_REG_SBMM_ID 0xB004
+#define MLXSW_REG_SBMM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbmm = {
+ .id = MLXSW_REG_SBMM_ID,
+ .len = MLXSW_REG_SBMM_LEN,
+};
+
+/* reg_sbmm_prio
+ * Switch Priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4);
+
+/* reg_sbmm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24);
+
+/* reg_sbmm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbmm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
+ u32 max_buff, u8 pool)
+{
+ MLXSW_REG_ZERO(sbmm, payload);
+ mlxsw_reg_sbmm_prio_set(payload, prio);
+ mlxsw_reg_sbmm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbmm_max_buff_set(payload, max_buff);
+ mlxsw_reg_sbmm_pool_set(payload, pool);
+}
+
static inline const char *mlxsw_reg_id_str(u16 reg_id)
{
switch (reg_id) {
@@ -1272,18 +2357,34 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SGCR";
case MLXSW_REG_SPAD_ID:
return "SPAD";
- case MLXSW_REG_SMID_ID:
- return "SMID";
case MLXSW_REG_SSPR_ID:
return "SSPR";
+ case MLXSW_REG_SFDAT_ID:
+ return "SFDAT";
+ case MLXSW_REG_SFD_ID:
+ return "SFD";
+ case MLXSW_REG_SFN_ID:
+ return "SFN";
case MLXSW_REG_SPMS_ID:
return "SPMS";
+ case MLXSW_REG_SPVID_ID:
+ return "SPVID";
+ case MLXSW_REG_SPVM_ID:
+ return "SPVM";
case MLXSW_REG_SFGC_ID:
return "SFGC";
case MLXSW_REG_SFTR_ID:
return "SFTR";
case MLXSW_REG_SPMLR_ID:
return "SPMLR";
+ case MLXSW_REG_SVFA_ID:
+ return "SVFA";
+ case MLXSW_REG_SVPE_ID:
+ return "SVPE";
+ case MLXSW_REG_SFMR_ID:
+ return "SFMR";
+ case MLXSW_REG_SPVMLR_ID:
+ return "SPVMLR";
case MLXSW_REG_PMLP_ID:
return "PMLP";
case MLXSW_REG_PMTU_ID:
@@ -1296,12 +2397,22 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "PAOS";
case MLXSW_REG_PPCNT_ID:
return "PPCNT";
+ case MLXSW_REG_PBMC_ID:
+ return "PBMC";
case MLXSW_REG_PSPA_ID:
return "PSPA";
case MLXSW_REG_HTGT_ID:
return "HTGT";
case MLXSW_REG_HPKT_ID:
return "HPKT";
+ case MLXSW_REG_SBPR_ID:
+ return "SBPR";
+ case MLXSW_REG_SBCM_ID:
+ return "SBCM";
+ case MLXSW_REG_SBPM_ID:
+ return "SBPM";
+ case MLXSW_REG_SBMM_ID:
+ return "SBMM";
default:
return "*UNKNOWN*";
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
new file mode 100644
index 000000000000..3be4a2355ead
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -0,0 +1,1949 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/bitops.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp_driver_version[] = "1.0";
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+ mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN];
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
+ return 0;
+}
+
+static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_up)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
+ is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+ MLXSW_PORT_ADMIN_STATUS_DOWN);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool *p_is_up)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+ u8 oper_status;
+ int err;
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+ if (err)
+ return err;
+ oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+ *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+ return 0;
+}
+
+static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ int err;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+ MLXSW_SP_VFID_BASE + vfid, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+ if (err)
+ return err;
+
+ set_bit(vfid, mlxsw_sp->active_vfids);
+ return 0;
+}
+
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ clear_bit(vfid, mlxsw_sp->active_vfids);
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+ MLXSW_SP_VFID_BASE + vfid, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ unsigned char *addr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+
+ mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
+ mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
+
+ ether_addr_copy(addr, mlxsw_sp->base_mac);
+ addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
+ return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, enum mlxsw_reg_spms_state state)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spms_pl;
+ int err;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+ int max_mtu;
+ int err;
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+ if (err)
+ return err;
+ max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+ if (mtu > max_mtu)
+ return -EINVAL;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char svpe_pl[MLXSW_REG_SVPE_LEN];
+
+ mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
+}
+
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
+ fid, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool learn_enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spvmlr_pl;
+ int err;
+
+ spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
+ if (!spvmlr_pl)
+ return -ENOMEM;
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
+ learn_enable);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
+ kfree(spvmlr_pl);
+ return err;
+}
+
+static int
+mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+ mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool *p_usable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ return 0;
+}
+
+static int mlxsw_sp_port_open(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err)
+ return err;
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int mlxsw_sp_port_stop(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+}
+
+static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+ const struct mlxsw_tx_info tx_info = {
+ .local_port = mlxsw_sp_port->local_port,
+ .is_emad = false,
+ };
+ u64 len;
+ int err;
+
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+ struct sk_buff *skb_orig = skb;
+
+ skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ if (eth_skb_pad(skb)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return NETDEV_TX_OK;
+ }
+
+ mlxsw_sp_txhdr_construct(skb, &tx_info);
+ len = skb->len;
+ /* Due to a race we might fail here because of a full queue. In that
+ * unlikely case we simply drop the packet.
+ */
+ err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
+
+ if (!err) {
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
+ if (err)
+ return err;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ if (err)
+ return err;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_port_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ u32 tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* tx_dropped is u32, updated without syncp protection. */
+ tx_dropped += p->tx_dropped;
+ }
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spvm_pl;
+ int err;
+
+ spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
+ if (!spvm_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
+ vid_end, is_member, untagged);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
+ kfree(spvm_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ u16 vid, last_visited_vid;
+ int err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
+ vid);
+ if (err) {
+ last_visited_vid = vid;
+ goto err_port_vid_to_fid_set;
+ }
+ }
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+ if (err) {
+ last_visited_vid = VLAN_N_VID;
+ goto err_port_vid_to_fid_set;
+ }
+
+ return 0;
+
+err_port_vid_to_fid_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
+ vid);
+ return err;
+}
+
+static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ u16 vid;
+ int err;
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ if (err)
+ return err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
+ vid, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+ u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *sftr_pl;
+ int err;
+
+ /* VLAN 0 is added to HW filter when device goes up, but it is
+ * reserved in our case, so simply return.
+ */
+ if (!vid)
+ return 0;
+
+ if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
+ netdev_warn(dev, "VID=%d already configured\n", vid);
+ return 0;
+ }
+
+ if (!test_bit(vid, mlxsw_sp->active_vfids)) {
+ err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
+ if (err) {
+ netdev_err(dev, "Failed to create vFID=%d\n",
+ MLXSW_SP_VFID_BASE + vid);
+ return err;
+ }
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl) {
+ err = -ENOMEM;
+ goto err_flood_table_alloc;
+ }
+ mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
+ MLXSW_PORT_CPU_PORT, true);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+ kfree(sftr_pl);
+ if (err) {
+ netdev_err(dev, "Failed to configure flood table\n");
+ goto err_flood_table_config;
+ }
+ }
+
+ /* In case we fail in the following steps, we intentionally do not
+ * destroy the associated vFID.
+ */
+
+ /* When adding the first VLAN interface on a bridged port we need to
+ * transition all the active 802.1Q bridge VLANs to use explicit
+ * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
+ */
+ if (!mlxsw_sp_port->nr_vfids) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err) {
+ netdev_err(dev, "Failed to set to Virtual mode\n");
+ return err;
+ }
+ }
+
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ true, MLXSW_SP_VFID_BASE + vid, vid);
+ if (err) {
+ netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
+ vid, MLXSW_SP_VFID_BASE + vid);
+ goto err_port_vid_to_fid_set;
+ }
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (err) {
+ netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+ goto err_port_vid_learning_set;
+ }
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
+ if (err) {
+ netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+ vid);
+ goto err_port_add_vid;
+ }
+
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+ MLXSW_REG_SPMS_STATE_FORWARDING);
+ if (err) {
+ netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+ goto err_port_stp_state_set;
+ }
+
+ mlxsw_sp_port->nr_vfids++;
+ set_bit(vid, mlxsw_sp_port->active_vfids);
+
+ return 0;
+
+err_flood_table_config:
+err_flood_table_alloc:
+ mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
+ return err;
+
+err_port_stp_state_set:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+err_port_add_vid:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vid_learning_set:
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
+ MLXSW_SP_VFID_BASE + vid, vid);
+err_port_vid_to_fid_set:
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ return err;
+}
+
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ /* VLAN 0 is removed from HW filter when device goes down, but
+ * it is reserved in our case, so simply return.
+ */
+ if (!vid)
+ return 0;
+
+ if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
+ netdev_warn(dev, "VID=%d does not exist\n", vid);
+ return 0;
+ }
+
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+ MLXSW_REG_SPMS_STATE_DISCARDING);
+ if (err) {
+ netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+ return err;
+ }
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ if (err) {
+ netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+ vid);
+ return err;
+ }
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+ if (err) {
+ netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
+ return err;
+ }
+
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ false, MLXSW_SP_VFID_BASE + vid,
+ vid);
+ if (err) {
+ netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
+ vid, MLXSW_SP_VFID_BASE + vid);
+ return err;
+ }
+
+ /* When removing the last VLAN interface on a bridged port we need to
+ * transition all active 802.1Q bridge VLANs to use VID to FID
+ * mappings and set port's mode to VLAN mode.
+ */
+ if (mlxsw_sp_port->nr_vfids == 1) {
+ err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ if (err) {
+ netdev_err(dev, "Failed to set to VLAN mode\n");
+ return err;
+ }
+ }
+
+ mlxsw_sp_port->nr_vfids--;
+ clear_bit(vid, mlxsw_sp_port->active_vfids);
+
+ return 0;
+}
+
+static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
+ .ndo_open = mlxsw_sp_port_open,
+ .ndo_stop = mlxsw_sp_port_stop,
+ .ndo_start_xmit = mlxsw_sp_port_xmit,
+ .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
+ .ndo_change_mtu = mlxsw_sp_port_change_mtu,
+ .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
+ .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
+ .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+};
+
+static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mlxsw_sp_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_sp->bus_info->fw_rev.major,
+ mlxsw_sp->bus_info->fw_rev.minor,
+ mlxsw_sp->bus_info->fw_rev.subminor);
+ strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sp_port_hw_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
+ {
+ .str = "a_frames_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+ },
+ {
+ .str = "a_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+ },
+ {
+ .str = "a_frame_check_sequence_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+ },
+ {
+ .str = "a_alignment_errors",
+ .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+ },
+ {
+ .str = "a_octets_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+ },
+ {
+ .str = "a_octets_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+ },
+ {
+ .str = "a_in_range_length_errors",
+ .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+ },
+ {
+ .str = "a_out_of_range_length_field",
+ .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+ },
+ {
+ .str = "a_frame_too_long_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+ },
+ {
+ .str = "a_symbol_error_during_carrier",
+ .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+ },
+ {
+ .str = "a_mac_control_frames_transmitted",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+ },
+ {
+ .str = "a_mac_control_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+ },
+ {
+ .str = "a_unsupported_opcodes_received",
+ .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_xmitted",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+
+static void mlxsw_sp_port_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void mlxsw_sp_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
+ data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return MLXSW_SP_PORT_HW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct mlxsw_sp_port_link_mode {
+ u32 mask;
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+};
+
+static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .speed = 25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .speed = 50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .speed = 100000,
+ },
+};
+
+#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
+
+static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return SUPPORTED_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+ return SUPPORTED_Backplane;
+ return 0;
+}
+
+static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+ modes |= mlxsw_sp_port_link_mode[i].supported;
+ }
+ return modes;
+}
+
+static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+ modes |= mlxsw_sp_port_link_mode[i].advertised;
+ }
+ return modes;
+}
+
+static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_cmd *cmd)
+{
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ if (!carrier_ok)
+ goto out;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
+ speed = mlxsw_sp_port_link_mode[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return PORT_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+ return PORT_DA;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+ return PORT_NONE;
+
+ return PORT_OTHER;
+}
+
+static int mlxsw_sp_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_oper;
+ int err;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
+
+ cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
+ mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
+ mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+ cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
+ cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+ ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static u32 mlxsw_sp_to_ptys_speed(u32 speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (speed == mlxsw_sp_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static int mlxsw_sp_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 speed;
+ u32 eth_proto_new;
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ bool is_up;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+ mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
+ mlxsw_sp_to_ptys_speed(speed);
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+ eth_proto_new = eth_proto_new & eth_proto_cap;
+ if (!eth_proto_new) {
+ netdev_err(dev, "Not supported proto admin requested");
+ return -EINVAL;
+ }
+ if (eth_proto_new == eth_proto_admin)
+ return 0;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to set proto admin");
+ return err;
+ }
+
+ err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
+ if (err) {
+ netdev_err(dev, "Failed to get oper status");
+ return err;
+ }
+ if (!is_up)
+ return 0;
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_sp_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlxsw_sp_port_get_strings,
+ .get_ethtool_stats = mlxsw_sp_port_get_stats,
+ .get_sset_count = mlxsw_sp_port_get_sset_count,
+ .get_settings = mlxsw_sp_port_get_settings,
+ .set_settings = mlxsw_sp_port_set_settings,
+};
+
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *dev;
+ bool usable;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
+ if (!dev)
+ return -ENOMEM;
+ mlxsw_sp_port = netdev_priv(dev);
+ mlxsw_sp_port->dev = dev;
+ mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
+ mlxsw_sp_port->local_port = local_port;
+ mlxsw_sp_port->learning = 1;
+ mlxsw_sp_port->learning_sync = 1;
+ mlxsw_sp_port->uc_flood = 1;
+ mlxsw_sp_port->pvid = 1;
+
+ mlxsw_sp_port->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
+ if (!mlxsw_sp_port->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+
+ err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
+ mlxsw_sp_port->local_port);
+ goto err_dev_addr_init;
+ }
+
+ netif_carrier_off(dev);
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ /* Each packet needs to have a Tx header (metadata) on top all other
+ * headers.
+ */
+ dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+ err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_module_check;
+ }
+
+ if (!usable) {
+ dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+ mlxsw_sp_port->local_port);
+ goto port_not_usable;
+ }
+
+ err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_system_port_mapping_set;
+ }
+
+ err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ if (err)
+ goto err_port_admin_status_set;
+
+ err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_buffers_init;
+ }
+
+ mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_sp_port->local_port);
+ goto err_register_netdev;
+ }
+
+ err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_vlan_init;
+
+ mlxsw_sp->ports[local_port] = mlxsw_sp_port;
+ return 0;
+
+err_port_vlan_init:
+ unregister_netdev(dev);
+err_register_netdev:
+err_port_buffers_init:
+err_port_admin_status_set:
+err_port_mtu_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_init:
+ free_percpu(mlxsw_sp_port->pcpu_stats);
+err_alloc_stats:
+ free_netdev(dev);
+ return err;
+}
+
+static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 vfid;
+
+ for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
+ mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
+}
+
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ if (!mlxsw_sp_port)
+ return;
+ mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+ unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ free_percpu(mlxsw_sp_port->pcpu_stats);
+ free_netdev(mlxsw_sp_port->dev);
+}
+
+static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->ports);
+}
+
+static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
+ mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_sp->ports)
+ return -ENOMEM;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, i);
+ if (err)
+ goto err_port_create;
+ }
+ return 0;
+
+err_port_create:
+ for (i--; i >= 1; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->ports);
+ return err;
+}
+
+static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
+ char *pude_pl, void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ enum mlxsw_reg_pude_oper_status status;
+ u8 local_port;
+
+ local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ status = mlxsw_reg_pude_oper_status_get(pude_pl);
+ if (status == MLXSW_PORT_OPER_STATUS_UP) {
+ netdev_info(mlxsw_sp_port->dev, "link up\n");
+ netif_carrier_on(mlxsw_sp_port->dev);
+ } else {
+ netdev_info(mlxsw_sp_port->dev, "link down\n");
+ netif_carrier_off(mlxsw_sp_port->dev);
+ }
+}
+
+static struct mlxsw_event_listener mlxsw_sp_pude_event = {
+ .func = mlxsw_sp_pude_event_func,
+ .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sp_pude_event;
+ break;
+ }
+ err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_event_trap_set;
+
+ return 0;
+
+err_event_trap_set:
+ mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+ return err;
+}
+
+static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sp_pude_event;
+ break;
+ }
+ mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+}
+
+static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_FDB_MC,
+ },
+ /* Traps for specific L2 packet types, not trapped as FDB MC */
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_STP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LACP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_EAPOL,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LLDP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MMRP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MVRP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RPVST,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_DHCP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+ },
+};
+
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+ err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+ if (err)
+ goto err_rx_listener_register;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ mlxsw_sp_rx_listener[i].trap_id);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_rx_trap_set;
+ }
+ return 0;
+
+err_rx_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+err_rx_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_sp_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+ }
+ return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_sp_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+ }
+}
+
+static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_reg_sfgc_type type,
+ enum mlxsw_reg_sfgc_bridge_type bridge_type)
+{
+ enum mlxsw_flood_table_type table_type;
+ enum mlxsw_sp_flood_table flood_table;
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+
+ if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
+ table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
+ flood_table = 0;
+ } else {
+ table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+ if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
+ flood_table = MLXSW_SP_FLOOD_TABLE_UC;
+ else
+ flood_table = MLXSW_SP_FLOOD_TABLE_BM;
+ }
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
+ flood_table);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
+}
+
+static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int type, err;
+
+ /* For non-offloaded netdevs, flood all traffic types to CPU
+ * port.
+ */
+ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+ if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+ continue;
+
+ err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
+ if (err)
+ return err;
+ }
+
+ /* For bridged ports, use one flooding table for unknown unicast
+ * traffic and a second table for unregistered multicast and
+ * broadcast.
+ */
+ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+ if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+ continue;
+
+ err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ mlxsw_sp->core = mlxsw_core;
+ mlxsw_sp->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_sp_base_mac_get(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
+ return err;
+ }
+
+ err = mlxsw_sp_ports_create(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
+ goto err_ports_create;
+ }
+
+ err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
+ goto err_event_register;
+ }
+
+ err = mlxsw_sp_traps_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
+ goto err_rx_listener_register;
+ }
+
+ err = mlxsw_sp_flood_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
+ goto err_flood_init;
+ }
+
+ err = mlxsw_sp_buffers_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
+ goto err_buffers_init;
+ }
+
+ err = mlxsw_sp_switchdev_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
+ goto err_switchdev_init;
+ }
+
+ return 0;
+
+err_switchdev_init:
+err_buffers_init:
+err_flood_init:
+ mlxsw_sp_traps_fini(mlxsw_sp);
+err_rx_listener_register:
+ mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+ mlxsw_sp_ports_remove(mlxsw_sp);
+err_ports_create:
+ mlxsw_sp_vfids_fini(mlxsw_sp);
+ return err;
+}
+
+static void mlxsw_sp_fini(void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_traps_fini(mlxsw_sp);
+ mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+ mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_vfids_fini(mlxsw_sp);
+}
+
+static struct mlxsw_config_profile mlxsw_sp_config_profile = {
+ .used_max_vepa_channels = 1,
+ .max_vepa_channels = 0,
+ .used_max_lag = 1,
+ .max_lag = 64,
+ .used_max_port_per_lag = 1,
+ .max_port_per_lag = 16,
+ .used_max_mid = 1,
+ .max_mid = 7000,
+ .used_max_pgt = 1,
+ .max_pgt = 0,
+ .used_max_system_port = 1,
+ .max_system_port = 64,
+ .used_max_vlan_groups = 1,
+ .max_vlan_groups = 127,
+ .used_max_regions = 1,
+ .max_regions = 400,
+ .used_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .max_fid_offset_flood_tables = 2,
+ .fid_offset_flood_table_size = VLAN_N_VID - 1,
+ .max_fid_flood_tables = 1,
+ .fid_flood_table_size = VLAN_N_VID,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
+static struct mlxsw_driver mlxsw_sp_driver = {
+ .kind = MLXSW_DEVICE_KIND_SPECTRUM,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp_init,
+ .fini = mlxsw_sp_fini,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp_config_profile,
+};
+
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+}
+
+static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* When port is not bridged untagged packets are tagged with
+ * PVID=VID=1, thereby creating an implicit VLAN interface in
+ * the device. Remove it and let bridge code take care of its
+ * own VLANs.
+ */
+ err = mlxsw_sp_port_kill_vid(dev, 0, 1);
+ if (err)
+ netdev_err(dev, "Failed to remove VID 1\n");
+
+ return err;
+}
+
+static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* Add implicit VLAN interface in the device, so that untagged
+ * packets will be classified to the default vFID.
+ */
+ err = mlxsw_sp_port_add_vid(dev, 0, 1);
+ if (err)
+ netdev_err(dev, "Failed to add VID 1\n");
+
+ return err;
+}
+
+static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ return !mlxsw_sp->master_bridge.dev ||
+ mlxsw_sp->master_bridge.dev == br_dev;
+}
+
+static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ mlxsw_sp->master_bridge.dev = br_dev;
+ mlxsw_sp->master_bridge.ref_count++;
+}
+
+static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ if (--mlxsw_sp->master_bridge.ref_count == 0)
+ mlxsw_sp->master_bridge.dev = NULL;
+}
+
+static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *upper_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ int err;
+
+ if (!mlxsw_sp_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ mlxsw_sp_port = netdev_priv(dev);
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ info = ptr;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ /* HW limitation forbids to put ports to multiple bridges. */
+ if (info->master && info->linking &&
+ netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
+ return NOTIFY_BAD;
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->master &&
+ netif_is_bridge_master(upper_dev)) {
+ if (info->linking) {
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to join bridge\n");
+ mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
+ mlxsw_sp_port->bridged = 1;
+ } else {
+ err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to leave bridge\n");
+ mlxsw_sp_port->bridged = 0;
+ mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
+ }
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_netdevice_event,
+};
+
+static int __init mlxsw_sp_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ err = mlxsw_core_driver_register(&mlxsw_sp_driver);
+ if (err)
+ goto err_core_driver_register;
+ return 0;
+
+err_core_driver_register:
+ unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ return err;
+}
+
+static void __exit mlxsw_sp_module_exit(void)
+{
+ mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+}
+
+module_init(mlxsw_sp_module_init);
+module_exit(mlxsw_sp_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Spectrum driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
new file mode 100644
index 000000000000..4365c8bccc6d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -0,0 +1,122 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_H
+#define _MLXSW_SPECTRUM_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+
+#include "core.h"
+
+#define MLXSW_SP_VFID_BASE VLAN_N_VID
+
+struct mlxsw_sp_port;
+
+struct mlxsw_sp {
+ unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+ unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
+ struct mlxsw_sp_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ unsigned char base_mac[ETH_ALEN];
+ struct {
+ struct delayed_work dw;
+#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+ unsigned int interval; /* ms */
+ } fdb_notify;
+#define MLXSW_SP_DEFAULT_AGEING_TIME 300
+ u32 ageing_time;
+ struct {
+ struct net_device *dev;
+ unsigned int ref_count;
+ } master_bridge;
+};
+
+struct mlxsw_sp_port_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 tx_dropped;
+};
+
+struct mlxsw_sp_port {
+ struct net_device *dev;
+ struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
+ struct mlxsw_sp *mlxsw_sp;
+ u8 local_port;
+ u8 stp_state;
+ u8 learning:1,
+ learning_sync:1,
+ uc_flood:1,
+ bridged:1;
+ u16 pvid;
+ /* 802.1Q bridge VLANs */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ /* VLAN interfaces */
+ unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 nr_vfids;
+};
+
+enum mlxsw_sp_flood_table {
+ MLXSW_SP_FLOOD_TABLE_UC,
+ MLXSW_SP_FLOOD_TABLE_BM,
+};
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+ u16 vid);
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged);
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+ u16 vid);
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
new file mode 100644
index 000000000000..d59195e3f7fb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -0,0 +1,422 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "port.h"
+#include "reg.h"
+
+struct mlxsw_sp_pb {
+ u8 index;
+ u16 size;
+};
+
+#define MLXSW_SP_PB(_index, _size) \
+ { \
+ .index = _index, \
+ .size = _size, \
+ }
+
+static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
+ MLXSW_SP_PB(0, 208),
+ MLXSW_SP_PB(1, 208),
+ MLXSW_SP_PB(2, 208),
+ MLXSW_SP_PB(3, 208),
+ MLXSW_SP_PB(4, 208),
+ MLXSW_SP_PB(5, 208),
+ MLXSW_SP_PB(6, 208),
+ MLXSW_SP_PB(7, 208),
+ MLXSW_SP_PB(9, 208),
+};
+
+#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+
+static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ int i;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
+ 0xffff, 0xffff / 2);
+ for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
+ const struct mlxsw_sp_pb *pb;
+
+ pb = &mlxsw_sp_pbs[i];
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+ }
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+ MLXSW_REG(pbmc), pbmc_pl);
+}
+
+#define MLXSW_SP_SB_BYTES_PER_CELL 96
+
+struct mlxsw_sp_sb_pool {
+ u8 pool;
+ enum mlxsw_reg_sbpr_dir dir;
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+};
+
+#define MLXSW_SP_SB_POOL_INGRESS_SIZE \
+ ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \
+ MLXSW_SP_SB_BYTES_PER_CELL)
+#define MLXSW_SP_SB_POOL_EGRESS_SIZE \
+ ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \
+ MLXSW_SP_SB_BYTES_PER_CELL)
+
+#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \
+ { \
+ .pool = _pool, \
+ .dir = _dir, \
+ .mode = _mode, \
+ .size = _size, \
+ }
+
+#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \
+ MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \
+ MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \
+ MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \
+ MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
+ MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE),
+ MLXSW_SP_SB_POOL_INGRESS(1, 0),
+ MLXSW_SP_SB_POOL_INGRESS(2, 0),
+ MLXSW_SP_SB_POOL_INGRESS(3, 0),
+ MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+ MLXSW_SP_SB_POOL_EGRESS(1, 0),
+ MLXSW_SP_SB_POOL_EGRESS(2, 0),
+ MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+};
+
+#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+
+static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
+ const struct mlxsw_sp_sb_pool *pool;
+
+ pool = &mlxsw_sp_sb_pools[i];
+ mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
+ pool->mode, pool->size);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+struct mlxsw_sp_sb_cm {
+ union {
+ u8 pg;
+ u8 tc;
+ } u;
+ enum mlxsw_reg_sbcm_dir dir;
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+};
+
+#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \
+ { \
+ .u.pg = _pg_tc, \
+ .dir = _dir, \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
+ }
+
+#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \
+ MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \
+ _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \
+ MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \
+ _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \
+ MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
+ MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8),
+ MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff),
+ MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+};
+
+#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+};
+
+#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
+ ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
+
+static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const struct mlxsw_sp_sb_cm *cms,
+ size_t cms_len)
+{
+ char sbcm_pl[MLXSW_REG_SBCM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < cms_len; i++) {
+ const struct mlxsw_sp_sb_cm *cm;
+
+ cm = &cms[i];
+ mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
+ cm->min_buff, cm->max_buff, cm->pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
+ MLXSW_SP_SB_CMS_LEN);
+}
+
+static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
+ MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+}
+
+struct mlxsw_sp_sb_pm {
+ u8 pool;
+ enum mlxsw_reg_sbpm_dir dir;
+ u32 min_buff;
+ u32 max_buff;
+};
+
+#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \
+ { \
+ .pool = _pool, \
+ .dir = _dir, \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ }
+
+#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \
+ MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \
+ _min_buff, _max_buff)
+
+#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \
+ MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \
+ _min_buff, _max_buff)
+
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
+ MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
+ MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
+ MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
+ MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
+ MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
+ MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
+ MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
+ MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+};
+
+#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+ const struct mlxsw_sp_sb_pm *pm;
+
+ pm = &mlxsw_sp_sb_pms[i];
+ mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
+ pm->pool, pm->dir,
+ pm->min_buff, pm->max_buff);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+ MLXSW_REG(sbpm), sbpm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+struct mlxsw_sp_sb_mm {
+ u8 prio;
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+};
+
+#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \
+ { \
+ .prio = _prio, \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
+ }
+
+static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
+ MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
+
+static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char sbmm_pl[MLXSW_REG_SBMM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
+ const struct mlxsw_sp_sb_mm *mc;
+
+ mc = &mlxsw_sp_sb_mms[i];
+ mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+ mc->max_buff, mc->pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+
+ return err;
+}
+
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
new file mode 100644
index 000000000000..617fb22b5d81
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -0,0 +1,903 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <net/switchdev.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+
+static int mlxsw_sp_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
+ memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
+ attr->u.ppid.id_len);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ attr->u.brport_flags =
+ (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
+ (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
+ (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 state)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ u16 vid;
+ int err;
+
+ switch (state) {
+ case BR_STATE_DISABLED: /* fall-through */
+ case BR_STATE_FORWARDING:
+ spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
+ break;
+ case BR_STATE_LISTENING: /* fall-through */
+ case BR_STATE_LEARNING:
+ spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
+ break;
+ case BR_STATE_BLOCKING:
+ spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
+ break;
+ default:
+ BUG();
+ }
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ mlxsw_sp_port->stp_state = state;
+ return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
+}
+
+static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end, bool set,
+ bool only_uc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 range = fid_end - fid_begin + 1;
+ char *sftr_pl;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+ mlxsw_sp_port->local_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+ if (err)
+ goto buffer_out;
+
+ /* Flooding control allows one to decide whether a given port will
+ * flood unicast traffic for which there is no FDB entry.
+ */
+ if (only_uc)
+ goto buffer_out;
+
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+ mlxsw_sp_port->local_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+buffer_out:
+ kfree(sftr_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool set)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ u16 vid, last_visited_vid;
+ int err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
+ true);
+ if (err) {
+ last_visited_vid = vid;
+ goto err_port_flood_set;
+ }
+ }
+
+ return 0;
+
+err_port_flood_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
+ netdev_err(dev, "Failed to configure unicast flooding\n");
+ return err;
+}
+
+static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ unsigned long brport_flags)
+{
+ unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
+ bool set;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if ((uc_flood ^ brport_flags) & BR_FLOOD) {
+ set = mlxsw_sp_port->uc_flood ? false : true;
+ err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
+ if (err)
+ return err;
+ }
+
+ mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
+ mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
+ mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
+
+ return 0;
+}
+
+static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
+{
+ char sfdat_pl[MLXSW_REG_SFDAT_LEN];
+ int err;
+
+ mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
+ if (err)
+ return err;
+ mlxsw_sp->ageing_time = ageing_time;
+ return 0;
+}
+
+static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ unsigned long ageing_clock_t)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
+}
+
+static int mlxsw_sp_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
+ attr->u.ageing_time);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvid_pl[MLXSW_REG_SPVID_LEN];
+
+ mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ int err;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+ if (err)
+ return err;
+
+ set_bit(fid, mlxsw_sp->active_fids);
+ return 0;
+}
+
+static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ clear_bit(fid, mlxsw_sp->active_fids);
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+ fid, fid);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+ enum mlxsw_reg_svfa_mt mt;
+
+ if (mlxsw_sp_port->nr_vfids)
+ mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ else
+ mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
+}
+
+static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+ enum mlxsw_reg_svfa_mt mt;
+
+ if (!mlxsw_sp_port->nr_vfids)
+ return 0;
+
+ mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
+}
+
+static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
+ u16 vid_end)
+{
+ u16 vid;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ err = mlxsw_sp_port_add_vid(dev, 0, vid);
+ if (err)
+ goto err_port_add_vid;
+ }
+ return 0;
+
+err_port_add_vid:
+ for (vid--; vid >= vid_begin; vid--)
+ mlxsw_sp_port_kill_vid(dev, 0, vid);
+ return err;
+}
+
+static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool flag_untagged, bool flag_pvid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ enum mlxsw_reg_svfa_mt mt;
+ u16 vid, vid_e;
+ int err;
+
+ /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+ * not bridged, then packets ingressing through the port with
+ * the specified VIDs will be directed to CPU.
+ */
+ if (!mlxsw_sp_port->bridged)
+ return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ if (!test_bit(vid, mlxsw_sp->active_fids)) {
+ err = mlxsw_sp_fid_create(mlxsw_sp, vid);
+ if (err) {
+ netdev_err(dev, "Failed to create FID=%d\n",
+ vid);
+ return err;
+ }
+
+ /* When creating a FID, we set a VID to FID mapping
+ * regardless of the port's mode.
+ */
+ mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
+ true, vid, vid);
+ if (err) {
+ netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
+ vid);
+ return err;
+ }
+ }
+
+ /* Set FID mapping according to port's mode */
+ err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(dev, "Failed to map FID=%d", vid);
+ return err;
+ }
+ }
+
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+ true, false);
+ if (err) {
+ netdev_err(dev, "Failed to configure flooding\n");
+ return err;
+ }
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true,
+ flag_untagged);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n",
+ vid, vid_e);
+ return err;
+ }
+ }
+
+ vid = vid_begin;
+ if (flag_pvid && mlxsw_sp_port->pvid != vid) {
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n",
+ vid);
+ return err;
+ }
+ mlxsw_sp_port->pvid = vid;
+ }
+
+ /* Changing activity bits only if HW operation succeded */
+ for (vid = vid_begin; vid <= vid_end; vid++)
+ set_bit(vid, mlxsw_sp_port->active_vlans);
+
+ return mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
+ mlxsw_sp_port->stp_state);
+}
+
+static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+ vlan->vid_begin, vlan->vid_end,
+ untagged_flag, pvid_flag);
+}
+
+static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port,
+ const char *mac, u16 vid, bool adding,
+ bool dynamic)
+{
+ enum mlxsw_reg_sfd_rec_policy policy;
+ enum mlxsw_reg_sfd_op op;
+ char *sfd_pl;
+ int err;
+
+ if (!vid)
+ vid = mlxsw_sp_port->pvid;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+ op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
+ MLXSW_REG_SFD_OP_WRITE_REMOVE;
+ mlxsw_reg_sfd_pack(sfd_pl, op, 0);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
+ mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mlxsw_sp_port->local_port);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd),
+ sfd_pl);
+ kfree(sfd_pl);
+
+ return err;
+}
+
+static int
+mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+ true, false);
+}
+
+static int mlxsw_sp_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj),
+ trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
+ u16 vid_end)
+{
+ u16 vid;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ err = mlxsw_sp_port_kill_vid(dev, 0, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end, bool init)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ u16 vid, vid_e;
+ int err;
+
+ /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+ * not bridged, then prevent packets ingressing through the
+ * port with the specified VIDs from being trapped to CPU.
+ */
+ if (!init && !mlxsw_sp_port->bridged)
+ return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false,
+ false);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n",
+ vid, vid_e);
+ return err;
+ }
+ }
+
+ if ((mlxsw_sp_port->pvid >= vid_begin) &&
+ (mlxsw_sp_port->pvid <= vid_end)) {
+ /* Default VLAN is always 1 */
+ mlxsw_sp_port->pvid = 1;
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port,
+ mlxsw_sp_port->pvid);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n",
+ vid);
+ return err;
+ }
+ }
+
+ if (init)
+ goto out;
+
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
+ if (err) {
+ netdev_err(dev, "Failed to clear flooding\n");
+ return err;
+ }
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ /* Remove FID mapping in case of Virtual mode */
+ err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(dev, "Failed to unmap FID=%d", vid);
+ return err;
+ }
+ }
+
+out:
+ /* Changing activity bits only if HW operation succeded */
+ for (vid = vid_begin; vid <= vid_end; vid++)
+ clear_bit(vid, mlxsw_sp_port->active_vlans);
+
+ return 0;
+}
+
+static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+ vlan->vid_begin, vlan->vid_end, false);
+}
+
+static int
+mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+ false, false);
+}
+
+static int mlxsw_sp_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ char *sfd_pl;
+ char mac[ETH_ALEN];
+ u16 vid;
+ u8 local_port;
+ u8 num_rec;
+ int stored_err = 0;
+ int i;
+ int err;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
+ do {
+ mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
+ err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core,
+ MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+
+ /* Even in case of error, we have to run the dump to the end
+ * so the session in firmware is finished.
+ */
+ if (stored_err)
+ continue;
+
+ for (i = 0; i < num_rec; i++) {
+ switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
+ case MLXSW_REG_SFD_REC_TYPE_UNICAST:
+ mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
+ &local_port);
+ if (local_port == mlxsw_sp_port->local_port) {
+ ether_addr_copy(fdb->addr, mac);
+ fdb->ndm_state = NUD_REACHABLE;
+ fdb->vid = vid;
+ err = cb(&fdb->obj);
+ if (err)
+ stored_err = err;
+ }
+ }
+ }
+ } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
+
+out:
+ kfree(sfd_pl);
+ return stored_err ? stored_err : err;
+}
+
+static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
+{
+ u16 vid;
+ int err = 0;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ vlan->flags = 0;
+ if (vid == mlxsw_sp_port->pvid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+ vlan->vid_begin = vid;
+ vlan->vid_end = vid;
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static int mlxsw_sp_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj), cb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
+ .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
+ .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
+ .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
+ .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
+ .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
+};
+
+static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl, int rec_index,
+ bool adding)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ char mac[ETH_ALEN];
+ u8 local_port;
+ u16 vid;
+ int err;
+
+ mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
+ return;
+ }
+
+ err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid,
+ adding && mlxsw_sp_port->learning, true);
+ if (err) {
+ if (net_ratelimit())
+ netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
+ return;
+ }
+
+ if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) {
+ struct switchdev_notifier_fdb_info info;
+ unsigned long notifier_type;
+
+ info.addr = mac;
+ info.vid = vid;
+ notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
+ call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev,
+ &info.info);
+ }
+}
+
+static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl, int rec_index)
+{
+ switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
+ case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
+ mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+ rec_index, true);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
+ mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+ rec_index, false);
+ break;
+ }
+}
+
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
+ msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+}
+
+static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ char *sfn_pl;
+ u8 num_rec;
+ int i;
+ int err;
+
+ sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
+ if (!sfn_pl)
+ return;
+
+ mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+
+ do {
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+ break;
+ }
+ num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+
+ } while (num_rec);
+
+ kfree(sfn_pl);
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+}
+
+static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
+ return err;
+ }
+ INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
+ mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+ return 0;
+}
+
+static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
+}
+
+static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 fid;
+
+ for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
+ mlxsw_sp_fid_destroy(mlxsw_sp, fid);
+}
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fdb_init(mlxsw_sp);
+}
+
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_fdb_fini(mlxsw_sp);
+ mlxsw_sp_fids_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* Allow only untagged packets to ingress and tag them internally
+ * with VID 1.
+ */
+ mlxsw_sp_port->pvid = 1;
+ err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
+ if (err) {
+ netdev_err(dev, "Unable to init VLANs\n");
+ return err;
+ }
+
+ /* Add implicit VLAN interface in the device, so that untagged
+ * packets will be classified to the default vFID.
+ */
+ err = mlxsw_sp_port_add_vid(dev, 0, 1);
+ if (err)
+ netdev_err(dev, "Failed to configure default vFID\n");
+
+ return err;
+}
+
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
+}
+
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 62cbbd1ada8d..d85960cfb694 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -57,13 +57,11 @@ static const char mlxsw_sx_driver_version[] = "1.0";
struct mlxsw_sx_port;
-#define MLXSW_SW_HW_ID_LEN 6
-
struct mlxsw_sx {
struct mlxsw_sx_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
- u8 hw_id[MLXSW_SW_HW_ID_LEN];
+ u8 hw_id[ETH_ALEN];
};
struct mlxsw_sx_port_pcpu_stats {
@@ -868,7 +866,7 @@ static int mlxsw_sx_port_attr_get(struct net_device *dev,
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
switch (attr->id) {
- case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
break;
@@ -925,7 +923,8 @@ static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
if (!spms_pl)
return -ENOMEM;
- mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
kfree(spms_pl);
return err;
@@ -1148,7 +1147,7 @@ static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
}
status = mlxsw_reg_pude_oper_status_get(pude_pl);
- if (MLXSW_PORT_OPER_STATUS_UP == status) {
+ if (status == MLXSW_PORT_OPER_STATUS_UP) {
netdev_info(mlxsw_sx_port->dev, "link up\n");
netif_carrier_on(mlxsw_sx_port->dev);
} else {
@@ -1178,8 +1177,7 @@ static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
if (err)
return err;
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
- MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
goto err_event_trap_set;
@@ -1212,9 +1210,8 @@ static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
if (unlikely(!mlxsw_sx_port)) {
- if (net_ratelimit())
- dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
- local_port);
+ dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
return;
}
@@ -1316,6 +1313,11 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
if (err)
return err;
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
&mlxsw_sx_rx_listener[i],
@@ -1324,7 +1326,6 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
goto err_rx_listener_register;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
- MLXSW_REG_HTGT_TRAP_GROUP_RX,
mlxsw_sx_rx_listener[i].trap_id);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
@@ -1339,7 +1340,6 @@ err_rx_trap_set:
err_rx_listener_register:
for (i--; i >= 0; i--) {
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
- MLXSW_REG_HTGT_TRAP_GROUP_RX,
mlxsw_sx_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -1357,7 +1357,6 @@ static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
- MLXSW_REG_HTGT_TRAP_GROUP_RX,
mlxsw_sx_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -1371,25 +1370,15 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
{
char sfgc_pl[MLXSW_REG_SFGC_LEN];
char sgcr_pl[MLXSW_REG_SGCR_LEN];
- char *smid_pl;
char *sftr_pl;
int err;
- /* Due to FW bug, we must configure SMID. */
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
- return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
- err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
- if (err)
- return err;
-
/* Configure a flooding table, which includes only CPU port. */
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
- mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+ mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
+ MLXSW_PORT_CPU_PORT, true);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
kfree(sftr_pl);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index 06fc46c78a0b..fdf94720ca62 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -38,6 +38,7 @@
#define MLXSW_TXHDR_LEN 0x10
#define MLXSW_TXHDR_VERSION_0 0
+#define MLXSW_TXHDR_VERSION_1 1
enum {
MLXSW_TXHDR_ETH_CTL,
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 3fd8ca6d4e7c..36a09d94b368 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -33,4 +33,13 @@ config ENC28J60_WRITEVERIFY
Enable the verify after the buffer write useful for debugging purpose.
If unsure, say N.
+config ENCX24J600
+ tristate "ENCX24J600 support"
+ depends on SPI
+ ---help---
+ Support for the Microchip ENC424J600/624J600 ethernet chip.
+
+ To compile this driver as a module, choose M here. The module will be
+ called encx24j600.
+
endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 573d4292b9ea..ff78f621b59a 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_ENC28J60) += enc28j60.o
+obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
new file mode 100644
index 000000000000..f3bb9055a292
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -0,0 +1,513 @@
+/**
+ * Register map access API - ENCX24J600 support
+ *
+ * Copyright 2015 Gridpoint
+ *
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+static inline bool is_bits_set(int value, int mask)
+{
+ return (value & mask) == mask;
+}
+
+static int encx24j600_switch_bank(struct encx24j600_context *ctx,
+ int bank)
+{
+ int ret = 0;
+
+ int bank_opcode = BANK_SELECT(bank);
+ ret = spi_write(ctx->spi, &bank_opcode, 1);
+ if (ret == 0)
+ ctx->bank = bank;
+
+ return ret;
+}
+
+static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
+ const void *buf, size_t len)
+{
+ struct spi_message m;
+ struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
+ { .tx_buf = buf, .len = len }, };
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(ctx->spi, &m);
+}
+
+static void regmap_lock_mutex(void *context)
+{
+ struct encx24j600_context *ctx = context;
+ mutex_lock(&ctx->mutex);
+}
+
+static void regmap_unlock_mutex(void *context)
+{
+ struct encx24j600_context *ctx = context;
+ mutex_unlock(&ctx->mutex);
+}
+
+static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val,
+ size_t len)
+{
+ struct encx24j600_context *ctx = context;
+ u8 banked_reg = reg & ADDR_MASK;
+ u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+ u8 cmd = RCRU;
+ int ret = 0;
+ int i = 0;
+ u8 tx_buf[2];
+
+ if (reg < 0x80) {
+ cmd = RCRCODE | banked_reg;
+ if ((banked_reg < 0x16) && (ctx->bank != bank))
+ ret = encx24j600_switch_bank(ctx, bank);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ /* Translate registers that are more effecient using
+ * 3-byte SPI commands
+ */
+ switch (reg) {
+ case EGPRDPT:
+ cmd = RGPRDPT; break;
+ case EGPWRPT:
+ cmd = RGPWRPT; break;
+ case ERXRDPT:
+ cmd = RRXRDPT; break;
+ case ERXWRPT:
+ cmd = RRXWRPT; break;
+ case EUDARDPT:
+ cmd = RUDARDPT; break;
+ case EUDAWRPT:
+ cmd = RUDAWRPT; break;
+ case EGPDATA:
+ case ERXDATA:
+ case EUDADATA:
+ default:
+ return -EINVAL;
+ }
+ }
+
+ tx_buf[i++] = cmd;
+ if (cmd == RCRU)
+ tx_buf[i++] = reg;
+
+ ret = spi_write_then_read(ctx->spi, tx_buf, i, val, len);
+
+ return ret;
+}
+
+static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
+ u8 reg, u8 *val, size_t len,
+ u8 unbanked_cmd, u8 banked_code)
+{
+ u8 banked_reg = reg & ADDR_MASK;
+ u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+ u8 cmd = unbanked_cmd;
+ struct spi_message m;
+ struct spi_transfer t[3] = { { .tx_buf = &cmd, .len = sizeof(cmd), },
+ { .tx_buf = &reg, .len = sizeof(reg), },
+ { .tx_buf = val, .len = len }, };
+
+ if (reg < 0x80) {
+ int ret = 0;
+ cmd = banked_code | banked_reg;
+ if ((banked_reg < 0x16) && (ctx->bank != bank))
+ ret = encx24j600_switch_bank(ctx, bank);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ /* Translate registers that are more effecient using
+ * 3-byte SPI commands
+ */
+ switch (reg) {
+ case EGPRDPT:
+ cmd = WGPRDPT; break;
+ case EGPWRPT:
+ cmd = WGPWRPT; break;
+ case ERXRDPT:
+ cmd = WRXRDPT; break;
+ case ERXWRPT:
+ cmd = WRXWRPT; break;
+ case EUDARDPT:
+ cmd = WUDARDPT; break;
+ case EUDAWRPT:
+ cmd = WUDAWRPT; break;
+ case EGPDATA:
+ case ERXDATA:
+ case EUDADATA:
+ default:
+ return -EINVAL;
+ }
+ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+
+ if (cmd == unbanked_cmd) {
+ t[1].tx_buf = &reg;
+ spi_message_add_tail(&t[1], &m);
+ }
+
+ spi_message_add_tail(&t[2], &m);
+ return spi_sync(ctx->spi, &m);
+}
+
+static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val,
+ size_t len)
+{
+ struct encx24j600_context *ctx = context;
+ return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE);
+}
+
+static int regmap_encx24j600_sfr_set_bits(struct encx24j600_context *ctx,
+ u8 reg, u8 val)
+{
+ return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFSU, BFSCODE);
+}
+
+static int regmap_encx24j600_sfr_clr_bits(struct encx24j600_context *ctx,
+ u8 reg, u8 val)
+{
+ return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFCU, BFCCODE);
+}
+
+static int regmap_encx24j600_reg_update_bits(void *context, unsigned int reg,
+ unsigned int mask,
+ unsigned int val)
+{
+ struct encx24j600_context *ctx = context;
+
+ int ret = 0;
+ unsigned int set_mask = mask & val;
+ unsigned int clr_mask = mask & ~val;
+
+ if ((reg >= 0x40 && reg < 0x6c) || reg >= 0x80)
+ return -EINVAL;
+
+ if (set_mask & 0xff)
+ ret = regmap_encx24j600_sfr_set_bits(ctx, reg, set_mask);
+
+ set_mask = (set_mask & 0xff00) >> 8;
+
+ if ((set_mask & 0xff) && (ret == 0))
+ ret = regmap_encx24j600_sfr_set_bits(ctx, reg + 1, set_mask);
+
+ if ((clr_mask & 0xff) && (ret == 0))
+ ret = regmap_encx24j600_sfr_clr_bits(ctx, reg, clr_mask);
+
+ clr_mask = (clr_mask & 0xff00) >> 8;
+
+ if ((clr_mask & 0xff) && (ret == 0))
+ ret = regmap_encx24j600_sfr_clr_bits(ctx, reg + 1, clr_mask);
+
+ return ret;
+}
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+ size_t count)
+{
+ struct encx24j600_context *ctx = context;
+
+ if (reg < 0xc0)
+ return encx24j600_cmdn(ctx, reg, data, count);
+ else
+ /* SPI 1-byte command. Ignore data */
+ return spi_write(ctx->spi, &reg, 1);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write);
+
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count)
+{
+ struct encx24j600_context *ctx = context;
+
+ if (reg == RBSEL && count > 1)
+ count = 1;
+
+ return spi_write_then_read(ctx->spi, &reg, sizeof(reg), data, count);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_read);
+
+static int regmap_encx24j600_write(void *context, const void *data,
+ size_t len)
+{
+ u8 *dout = (u8 *)data;
+ u8 reg = dout[0];
+ ++dout;
+ --len;
+
+ if (reg > 0xa0)
+ return regmap_encx24j600_spi_write(context, reg, dout, len);
+
+ if (len > 2)
+ return -EINVAL;
+
+ return regmap_encx24j600_sfr_write(context, reg, dout, len);
+}
+
+static int regmap_encx24j600_read(void *context,
+ const void *reg_buf, size_t reg_size,
+ void *val, size_t val_size)
+{
+ u8 reg = *(const u8 *)reg_buf;
+
+ if (reg_size != 1) {
+ pr_err("%s: reg=%02x reg_size=%zu\n", __func__, reg, reg_size);
+ return -EINVAL;
+ }
+
+ if (reg > 0xa0)
+ return regmap_encx24j600_spi_read(context, reg, val, val_size);
+
+ if (val_size > 2) {
+ pr_err("%s: reg=%02x val_size=%zu\n", __func__, reg, val_size);
+ return -EINVAL;
+ }
+
+ return regmap_encx24j600_sfr_read(context, reg, val, val_size);
+}
+
+static bool encx24j600_regmap_readable(struct device *dev, unsigned int reg)
+{
+ if ((reg < 0x36) ||
+ ((reg >= 0x40) && (reg < 0x4c)) ||
+ ((reg >= 0x52) && (reg < 0x56)) ||
+ ((reg >= 0x60) && (reg < 0x66)) ||
+ ((reg >= 0x68) && (reg < 0x80)) ||
+ ((reg >= 0x86) && (reg < 0x92)) ||
+ (reg == 0xc8))
+ return true;
+ else
+ return false;
+}
+
+static bool encx24j600_regmap_writeable(struct device *dev, unsigned int reg)
+{
+ if ((reg < 0x12) ||
+ ((reg >= 0x14) && (reg < 0x1a)) ||
+ ((reg >= 0x1c) && (reg < 0x36)) ||
+ ((reg >= 0x40) && (reg < 0x4c)) ||
+ ((reg >= 0x52) && (reg < 0x56)) ||
+ ((reg >= 0x60) && (reg < 0x68)) ||
+ ((reg >= 0x6c) && (reg < 0x80)) ||
+ ((reg >= 0x86) && (reg < 0x92)) ||
+ ((reg >= 0xc0) && (reg < 0xc8)) ||
+ ((reg >= 0xca) && (reg < 0xf0)))
+ return true;
+ else
+ return false;
+}
+
+static bool encx24j600_regmap_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ERXHEAD:
+ case EDMACS:
+ case ETXSTAT:
+ case ETXWIRE:
+ case ECON1: /* Can be modified via single byte cmds */
+ case ECON2: /* Can be modified via single byte cmds */
+ case ESTAT:
+ case EIR: /* Can be modified via single byte cmds */
+ case MIRD:
+ case MISTAT:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool encx24j600_regmap_precious(struct device *dev, unsigned int reg)
+{
+ /* single byte cmds are precious */
+ if (((reg >= 0xc0) && (reg < 0xc8)) ||
+ ((reg >= 0xca) && (reg < 0xf0)))
+ return true;
+ else
+ return false;
+}
+
+static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct encx24j600_context *ctx = context;
+ int ret;
+ unsigned int mistat;
+
+ reg = MIREGADR_VAL | (reg & PHREG_MASK);
+ ret = regmap_write(ctx->regmap, MIREGADR, reg);
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_write(ctx->regmap, MICMD, MIIRD);
+ if (unlikely(ret))
+ goto err_out;
+
+ usleep_range(26, 100);
+ while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+ (mistat & BUSY))
+ cpu_relax();
+
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_write(ctx->regmap, MICMD, 0);
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_read(ctx->regmap, MIRD, val);
+
+err_out:
+ if (ret)
+ pr_err("%s: error %d reading reg %02x\n", __func__, ret,
+ reg & PHREG_MASK);
+
+ return ret;
+}
+
+static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct encx24j600_context *ctx = context;
+ int ret;
+ unsigned int mistat;
+
+ reg = MIREGADR_VAL | (reg & PHREG_MASK);
+ ret = regmap_write(ctx->regmap, MIREGADR, reg);
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_write(ctx->regmap, MIWR, val);
+ if (unlikely(ret))
+ goto err_out;
+
+ usleep_range(26, 100);
+ while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+ (mistat & BUSY))
+ cpu_relax();
+
+err_out:
+ if (ret)
+ pr_err("%s: error %d writing reg %02x=%04x\n", __func__, ret,
+ reg & PHREG_MASK, val);
+
+ return ret;
+}
+
+static bool encx24j600_phymap_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PHCON1:
+ case PHSTAT1:
+ case PHANA:
+ case PHANLPA:
+ case PHANE:
+ case PHCON2:
+ case PHSTAT2:
+ case PHSTAT3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool encx24j600_phymap_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PHCON1:
+ case PHCON2:
+ case PHANA:
+ return true;
+ case PHSTAT1:
+ case PHSTAT2:
+ case PHSTAT3:
+ case PHANLPA:
+ case PHANE:
+ default:
+ return false;
+ }
+}
+
+static bool encx24j600_phymap_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PHSTAT1:
+ case PHSTAT2:
+ case PHSTAT3:
+ case PHANLPA:
+ case PHANE:
+ case PHCON2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config regcfg = {
+ .name = "reg",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0xee,
+ .reg_stride = 2,
+ .cache_type = REGCACHE_RBTREE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .readable_reg = encx24j600_regmap_readable,
+ .writeable_reg = encx24j600_regmap_writeable,
+ .volatile_reg = encx24j600_regmap_volatile,
+ .precious_reg = encx24j600_regmap_precious,
+ .lock = regmap_lock_mutex,
+ .unlock = regmap_unlock_mutex,
+};
+
+static struct regmap_bus regmap_encx24j600 = {
+ .write = regmap_encx24j600_write,
+ .read = regmap_encx24j600_read,
+ .reg_update_bits = regmap_encx24j600_reg_update_bits,
+};
+
+static struct regmap_config phycfg = {
+ .name = "phy",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0x1f,
+ .cache_type = REGCACHE_RBTREE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .readable_reg = encx24j600_phymap_readable,
+ .writeable_reg = encx24j600_phymap_writeable,
+ .volatile_reg = encx24j600_phymap_volatile,
+};
+static struct regmap_bus phymap_encx24j600 = {
+ .reg_write = regmap_encx24j600_phy_reg_write,
+ .reg_read = regmap_encx24j600_phy_reg_read,
+};
+
+void devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx)
+{
+ mutex_init(&ctx->mutex);
+ regcfg.lock_arg = ctx;
+ ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+ ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
new file mode 100644
index 000000000000..2056b719c262
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -0,0 +1,1129 @@
+/**
+ * Microchip ENCX24J600 ethernet driver
+ *
+ * Copyright (C) 2015 Gridpoint
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+#define DRV_NAME "encx24j600"
+#define DRV_VERSION "1.0"
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/* SRAM memory layout:
+ *
+ * 0x0000-0x05ff TX buffers 1.5KB (1*1536) reside in the GP area in SRAM
+ * 0x0600-0x5fff RX buffers 22.5KB (15*1536) reside in the RX area in SRAM
+ */
+#define ENC_TX_BUF_START 0x0000U
+#define ENC_RX_BUF_START 0x0600U
+#define ENC_RX_BUF_END 0x5fffU
+#define ENC_SRAM_SIZE 0x6000U
+
+enum {
+ RXFILTER_NORMAL,
+ RXFILTER_MULTI,
+ RXFILTER_PROMISC
+};
+
+struct encx24j600_priv {
+ struct net_device *ndev;
+ struct mutex lock; /* device access lock */
+ struct encx24j600_context ctx;
+ struct sk_buff *tx_skb;
+ struct task_struct *kworker_task;
+ struct kthread_worker kworker;
+ struct kthread_work tx_work;
+ struct kthread_work setrx_work;
+ u16 next_packet;
+ bool hw_enabled;
+ bool full_duplex;
+ bool autoneg;
+ u16 speed;
+ int rxfilter;
+ u32 msg_enable;
+};
+
+static void dump_packet(const char *msg, int len, const char *data)
+{
+ pr_debug(DRV_NAME ": %s - packet len:%d\n", msg, len);
+ print_hex_dump_bytes("pk data: ", DUMP_PREFIX_OFFSET, data, len);
+}
+
+static void encx24j600_dump_rsv(struct encx24j600_priv *priv, const char *msg,
+ struct rsv *rsv)
+{
+ struct net_device *dev = priv->ndev;
+
+ netdev_info(dev, "RX packet Len:%d\n", rsv->len);
+ netdev_dbg(dev, "%s - NextPk: 0x%04x\n", msg,
+ rsv->next_packet);
+ netdev_dbg(dev, "RxOK: %d, DribbleNibble: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_RXOK),
+ RSV_GETBIT(rsv->rxstat, RSV_DRIBBLENIBBLE));
+ netdev_dbg(dev, "CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_CRCERROR),
+ RSV_GETBIT(rsv->rxstat, RSV_LENCHECKERR),
+ RSV_GETBIT(rsv->rxstat, RSV_LENOUTOFRANGE));
+ netdev_dbg(dev, "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_RXMULTICAST),
+ RSV_GETBIT(rsv->rxstat, RSV_RXBROADCAST),
+ RSV_GETBIT(rsv->rxstat, RSV_RXLONGEVDROPEV),
+ RSV_GETBIT(rsv->rxstat, RSV_CARRIEREV));
+ netdev_dbg(dev, "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_RXCONTROLFRAME),
+ RSV_GETBIT(rsv->rxstat, RSV_RXPAUSEFRAME),
+ RSV_GETBIT(rsv->rxstat, RSV_RXUNKNOWNOPCODE),
+ RSV_GETBIT(rsv->rxstat, RSV_RXTYPEVLAN));
+}
+
+static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
+{
+ struct net_device *dev = priv->ndev;
+ unsigned int val = 0;
+ int ret = regmap_read(priv->ctx.regmap, reg, &val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
+ __func__, ret, reg);
+ return val;
+}
+
+static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_write(priv->ctx.regmap, reg, val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+ __func__, ret, reg, val);
+}
+
+static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
+ u16 mask, u16 val)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
+ __func__, ret, reg, val, mask);
+}
+
+static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
+{
+ struct net_device *dev = priv->ndev;
+ unsigned int val = 0;
+ int ret = regmap_read(priv->ctx.phymap, reg, &val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
+ __func__, ret, reg);
+ return val;
+}
+
+static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_write(priv->ctx.phymap, reg, val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+ __func__, ret, reg, val);
+}
+
+static void encx24j600_clr_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+ encx24j600_update_reg(priv, reg, mask, 0);
+}
+
+static void encx24j600_set_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+ encx24j600_update_reg(priv, reg, mask, mask);
+}
+
+static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_write(priv->ctx.regmap, cmd, 0);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
+ __func__, ret, cmd);
+}
+
+static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
+ size_t count)
+{
+ int ret;
+ mutex_lock(&priv->ctx.mutex);
+ ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
+ mutex_unlock(&priv->ctx.mutex);
+
+ return ret;
+}
+
+static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
+ const u8 *data, size_t count)
+{
+ int ret;
+ mutex_lock(&priv->ctx.mutex);
+ ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
+ mutex_unlock(&priv->ctx.mutex);
+
+ return ret;
+}
+
+static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
+{
+ u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
+ if (priv->autoneg == AUTONEG_ENABLE) {
+ phcon1 |= ANEN | RENEG;
+ } else {
+ phcon1 &= ~ANEN;
+ if (priv->speed == SPEED_100)
+ phcon1 |= SPD100;
+ else
+ phcon1 &= ~SPD100;
+
+ if (priv->full_duplex)
+ phcon1 |= PFULDPX;
+ else
+ phcon1 &= ~PFULDPX;
+ }
+ encx24j600_write_phy(priv, PHCON1, phcon1);
+}
+
+/* Waits for autonegotiation to complete. */
+static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ unsigned long timeout = jiffies + msecs_to_jiffies(2000);
+ u16 phstat1;
+ u16 estat;
+ int ret = 0;
+
+ phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+ while ((phstat1 & ANDONE) == 0) {
+ if (time_after(jiffies, timeout)) {
+ u16 phstat3;
+
+ netif_notice(priv, drv, dev, "timeout waiting for autoneg done\n");
+
+ priv->autoneg = AUTONEG_DISABLE;
+ phstat3 = encx24j600_read_phy(priv, PHSTAT3);
+ priv->speed = (phstat3 & PHY3SPD100)
+ ? SPEED_100 : SPEED_10;
+ priv->full_duplex = (phstat3 & PHY3DPX) ? 1 : 0;
+ encx24j600_update_phcon1(priv);
+ netif_notice(priv, drv, dev, "Using parallel detection: %s/%s",
+ priv->speed == SPEED_100 ? "100" : "10",
+ priv->full_duplex ? "Full" : "Half");
+
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+ }
+
+ estat = encx24j600_read_reg(priv, ESTAT);
+ if (estat & PHYDPX) {
+ encx24j600_set_bits(priv, MACON2, FULDPX);
+ encx24j600_write_reg(priv, MABBIPG, 0x15);
+ } else {
+ encx24j600_clr_bits(priv, MACON2, FULDPX);
+ encx24j600_write_reg(priv, MABBIPG, 0x12);
+ /* Max retransmittions attempt */
+ encx24j600_write_reg(priv, MACLCON, 0x370f);
+ }
+
+ return ret;
+}
+
+/* Access the PHY to determine link status */
+static void encx24j600_check_link_status(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u16 estat;
+
+ estat = encx24j600_read_reg(priv, ESTAT);
+
+ if (estat & PHYLNK) {
+ if (priv->autoneg == AUTONEG_ENABLE)
+ encx24j600_wait_for_autoneg(priv);
+
+ netif_carrier_on(dev);
+ netif_info(priv, ifup, dev, "link up\n");
+ } else {
+ netif_info(priv, ifdown, dev, "link down\n");
+
+ /* Re-enable autoneg since we won't know what we might be
+ * connected to when the link is brought back up again.
+ */
+ priv->autoneg = AUTONEG_ENABLE;
+ priv->full_duplex = true;
+ priv->speed = SPEED_100;
+ netif_carrier_off(dev);
+ }
+}
+
+static void encx24j600_int_link_handler(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+
+ netif_dbg(priv, intr, dev, "%s", __func__);
+ encx24j600_check_link_status(priv);
+ encx24j600_clr_bits(priv, EIR, LINKIF);
+}
+
+static void encx24j600_tx_complete(struct encx24j600_priv *priv, bool err)
+{
+ struct net_device *dev = priv->ndev;
+
+ if (!priv->tx_skb) {
+ BUG();
+ return;
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (err)
+ dev->stats.tx_errors++;
+ else
+ dev->stats.tx_packets++;
+
+ dev->stats.tx_bytes += priv->tx_skb->len;
+
+ encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+ netif_dbg(priv, tx_done, dev, "TX Done%s\n", err ? ": Err" : "");
+
+ dev_kfree_skb(priv->tx_skb);
+ priv->tx_skb = NULL;
+
+ netif_wake_queue(dev);
+
+ mutex_unlock(&priv->lock);
+}
+
+static int encx24j600_receive_packet(struct encx24j600_priv *priv,
+ struct rsv *rsv)
+{
+ struct net_device *dev = priv->ndev;
+ struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
+ if (!skb) {
+ pr_err_ratelimited("RX: OOM: packet dropped\n");
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len);
+
+ if (netif_msg_pktdata(priv))
+ dump_packet("RX", skb->len, skb->data);
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
+ /* Maintain stats */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += rsv->len;
+ priv->next_packet = rsv->next_packet;
+
+ netif_rx(skb);
+
+ return 0;
+}
+
+static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
+{
+ struct net_device *dev = priv->ndev;
+
+ while (packet_count--) {
+ struct rsv rsv;
+ u16 newrxtail;
+
+ encx24j600_write_reg(priv, ERXRDPT, priv->next_packet);
+ encx24j600_raw_read(priv, RRXDATA, (u8 *)&rsv, sizeof(rsv));
+
+ if (netif_msg_rx_status(priv))
+ encx24j600_dump_rsv(priv, __func__, &rsv);
+
+ if (!RSV_GETBIT(rsv.rxstat, RSV_RXOK) ||
+ (rsv.len > MAX_FRAMELEN)) {
+ netif_err(priv, rx_err, dev, "RX Error %04x\n",
+ rsv.rxstat);
+ dev->stats.rx_errors++;
+
+ if (RSV_GETBIT(rsv.rxstat, RSV_CRCERROR))
+ dev->stats.rx_crc_errors++;
+ if (RSV_GETBIT(rsv.rxstat, RSV_LENCHECKERR))
+ dev->stats.rx_frame_errors++;
+ if (rsv.len > MAX_FRAMELEN)
+ dev->stats.rx_over_errors++;
+ } else {
+ encx24j600_receive_packet(priv, &rsv);
+ }
+
+ newrxtail = priv->next_packet - 2;
+ if (newrxtail == ENC_RX_BUF_START)
+ newrxtail = SRAM_SIZE - 2;
+
+ encx24j600_cmd(priv, SETPKTDEC);
+ encx24j600_write_reg(priv, ERXTAIL, newrxtail);
+ }
+}
+
+static irqreturn_t encx24j600_isr(int irq, void *dev_id)
+{
+ struct encx24j600_priv *priv = dev_id;
+ struct net_device *dev = priv->ndev;
+ int eir;
+
+ /* Clear interrupts */
+ encx24j600_cmd(priv, CLREIE);
+
+ eir = encx24j600_read_reg(priv, EIR);
+
+ if (eir & LINKIF)
+ encx24j600_int_link_handler(priv);
+
+ if (eir & TXIF)
+ encx24j600_tx_complete(priv, false);
+
+ if (eir & TXABTIF)
+ encx24j600_tx_complete(priv, true);
+
+ if (eir & RXABTIF) {
+ if (eir & PCFULIF) {
+ /* Packet counter is full */
+ netif_err(priv, rx_err, dev, "Packet counter full\n");
+ }
+ dev->stats.rx_dropped++;
+ encx24j600_clr_bits(priv, EIR, RXABTIF);
+ }
+
+ if (eir & PKTIF) {
+ u8 packet_count;
+
+ mutex_lock(&priv->lock);
+
+ packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+ while (packet_count) {
+ encx24j600_rx_packets(priv, packet_count);
+ packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+ }
+
+ mutex_unlock(&priv->lock);
+ }
+
+ /* Enable interrupts */
+ encx24j600_cmd(priv, SETEIE);
+
+ return IRQ_HANDLED;
+}
+
+static int encx24j600_soft_reset(struct encx24j600_priv *priv)
+{
+ int ret = 0;
+ int timeout;
+ u16 eudast;
+
+ /* Write and verify a test value to EUDAST */
+ regcache_cache_bypass(priv->ctx.regmap, true);
+ timeout = 10;
+ do {
+ encx24j600_write_reg(priv, EUDAST, EUDAST_TEST_VAL);
+ eudast = encx24j600_read_reg(priv, EUDAST);
+ usleep_range(25, 100);
+ } while ((eudast != EUDAST_TEST_VAL) && --timeout);
+ regcache_cache_bypass(priv->ctx.regmap, false);
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto err_out;
+ }
+
+ /* Wait for CLKRDY to become set */
+ timeout = 10;
+ while (!(encx24j600_read_reg(priv, ESTAT) & CLKRDY) && --timeout)
+ usleep_range(25, 100);
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto err_out;
+ }
+
+ /* Issue a System Reset command */
+ encx24j600_cmd(priv, SETETHRST);
+ usleep_range(25, 100);
+
+ /* Confirm that EUDAST has 0000h after system reset */
+ if (encx24j600_read_reg(priv, EUDAST) != 0) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* Wait for PHY register and status bits to become available */
+ usleep_range(256, 1000);
+
+err_out:
+ return ret;
+}
+
+static int encx24j600_hw_reset(struct encx24j600_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = encx24j600_soft_reset(priv);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static void encx24j600_reset_hw_tx(struct encx24j600_priv *priv)
+{
+ encx24j600_set_bits(priv, ECON2, TXRST);
+ encx24j600_clr_bits(priv, ECON2, TXRST);
+}
+
+static void encx24j600_hw_init_tx(struct encx24j600_priv *priv)
+{
+ /* Reset TX */
+ encx24j600_reset_hw_tx(priv);
+
+ /* Clear the TXIF flag if were previously set */
+ encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+ /* Write the Tx Buffer pointer */
+ encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+}
+
+static void encx24j600_hw_init_rx(struct encx24j600_priv *priv)
+{
+ encx24j600_cmd(priv, DISABLERX);
+
+ /* Set up RX packet start address in the SRAM */
+ encx24j600_write_reg(priv, ERXST, ENC_RX_BUF_START);
+
+ /* Preload the RX Data pointer to the beginning of the RX area */
+ encx24j600_write_reg(priv, ERXRDPT, ENC_RX_BUF_START);
+
+ priv->next_packet = ENC_RX_BUF_START;
+
+ /* Set up RX end address in the SRAM */
+ encx24j600_write_reg(priv, ERXTAIL, ENC_SRAM_SIZE - 2);
+
+ /* Reset the user data pointers */
+ encx24j600_write_reg(priv, EUDAST, ENC_SRAM_SIZE);
+ encx24j600_write_reg(priv, EUDAND, ENC_SRAM_SIZE + 1);
+
+ /* Set Max Frame length */
+ encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+}
+
+static void encx24j600_dump_config(struct encx24j600_priv *priv,
+ const char *msg)
+{
+ pr_info(DRV_NAME ": %s\n", msg);
+
+ /* CHIP configuration */
+ pr_info(DRV_NAME " ECON1: %04X\n", encx24j600_read_reg(priv, ECON1));
+ pr_info(DRV_NAME " ECON2: %04X\n", encx24j600_read_reg(priv, ECON2));
+ pr_info(DRV_NAME " ERXFCON: %04X\n", encx24j600_read_reg(priv,
+ ERXFCON));
+ pr_info(DRV_NAME " ESTAT: %04X\n", encx24j600_read_reg(priv, ESTAT));
+ pr_info(DRV_NAME " EIR: %04X\n", encx24j600_read_reg(priv, EIR));
+ pr_info(DRV_NAME " EIDLED: %04X\n", encx24j600_read_reg(priv, EIDLED));
+
+ /* MAC layer configuration */
+ pr_info(DRV_NAME " MACON1: %04X\n", encx24j600_read_reg(priv, MACON1));
+ pr_info(DRV_NAME " MACON2: %04X\n", encx24j600_read_reg(priv, MACON2));
+ pr_info(DRV_NAME " MAIPG: %04X\n", encx24j600_read_reg(priv, MAIPG));
+ pr_info(DRV_NAME " MACLCON: %04X\n", encx24j600_read_reg(priv,
+ MACLCON));
+ pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
+ MABBIPG));
+
+ /* PHY configuation */
+ pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1));
+ pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2));
+ pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA));
+ pr_info(DRV_NAME " PHANLPA: %04X\n", encx24j600_read_phy(priv,
+ PHANLPA));
+ pr_info(DRV_NAME " PHANE: %04X\n", encx24j600_read_phy(priv, PHANE));
+ pr_info(DRV_NAME " PHSTAT1: %04X\n", encx24j600_read_phy(priv,
+ PHSTAT1));
+ pr_info(DRV_NAME " PHSTAT2: %04X\n", encx24j600_read_phy(priv,
+ PHSTAT2));
+ pr_info(DRV_NAME " PHSTAT3: %04X\n", encx24j600_read_phy(priv,
+ PHSTAT3));
+}
+
+static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
+{
+ switch (priv->rxfilter) {
+ case RXFILTER_PROMISC:
+ encx24j600_set_bits(priv, MACON1, PASSALL);
+ encx24j600_write_reg(priv, ERXFCON, UCEN | MCEN | NOTMEEN);
+ break;
+ case RXFILTER_MULTI:
+ encx24j600_clr_bits(priv, MACON1, PASSALL);
+ encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN | MCEN);
+ break;
+ case RXFILTER_NORMAL:
+ default:
+ encx24j600_clr_bits(priv, MACON1, PASSALL);
+ encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN);
+ break;
+ }
+}
+
+static int encx24j600_hw_init(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = 0;
+ u16 eidled;
+ u16 macon2;
+
+ priv->hw_enabled = false;
+
+ eidled = encx24j600_read_reg(priv, EIDLED);
+ if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ netif_info(priv, drv, dev, "Silicon rev ID: 0x%02x\n",
+ (eidled & REVID_MASK) >> REVID_SHIFT);
+
+ /* PHY Leds: link status,
+ * LEDA: Link State + collision events
+ * LEDB: Link State + transmit/receive events
+ */
+ encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00);
+
+ /* Loopback disabled */
+ encx24j600_write_reg(priv, MACON1, 0x9);
+
+ /* interpacket gap value */
+ encx24j600_write_reg(priv, MAIPG, 0x0c12);
+
+ /* Write the auto negotiation pattern */
+ encx24j600_write_phy(priv, PHANA, PHANA_DEFAULT);
+
+ encx24j600_update_phcon1(priv);
+ encx24j600_check_link_status(priv);
+
+ macon2 = MACON2_RSV1 | TXCRCEN | PADCFG0 | PADCFG2 | MACON2_DEFER;
+ if ((priv->autoneg == AUTONEG_DISABLE) && priv->full_duplex)
+ macon2 |= FULDPX;
+
+ encx24j600_set_bits(priv, MACON2, macon2);
+
+ priv->rxfilter = RXFILTER_NORMAL;
+ encx24j600_set_rxfilter_mode(priv);
+
+ /* Program the Maximum frame length */
+ encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+
+ /* Init Tx pointers */
+ encx24j600_hw_init_tx(priv);
+
+ /* Init Rx pointers */
+ encx24j600_hw_init_rx(priv);
+
+ if (netif_msg_hw(priv))
+ encx24j600_dump_config(priv, "Hw is initialized");
+
+err_out:
+ return ret;
+}
+
+static void encx24j600_hw_enable(struct encx24j600_priv *priv)
+{
+ /* Clear the interrupt flags in case was set */
+ encx24j600_clr_bits(priv, EIR, (PCFULIF | RXABTIF | TXABTIF | TXIF |
+ PKTIF | LINKIF));
+
+ /* Enable the interrupts */
+ encx24j600_write_reg(priv, EIE, (PCFULIE | RXABTIE | TXABTIE | TXIE |
+ PKTIE | LINKIE | INTIE));
+
+ /* Enable RX */
+ encx24j600_cmd(priv, ENABLERX);
+
+ priv->hw_enabled = true;
+}
+
+static void encx24j600_hw_disable(struct encx24j600_priv *priv)
+{
+ /* Disable all interrupts */
+ encx24j600_write_reg(priv, EIE, 0);
+
+ /* Disable RX */
+ encx24j600_cmd(priv, DISABLERX);
+
+ priv->hw_enabled = false;
+}
+
+static int encx24j600_setlink(struct net_device *dev, u8 autoneg, u16 speed,
+ u8 duplex)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!priv->hw_enabled) {
+ /* link is in low power mode now; duplex setting
+ * will take effect on next encx24j600_hw_init()
+ */
+ if (speed == SPEED_10 || speed == SPEED_100) {
+ priv->autoneg = (autoneg == AUTONEG_ENABLE);
+ priv->full_duplex = (duplex == DUPLEX_FULL);
+ priv->speed = (speed == SPEED_100);
+ } else {
+ netif_warn(priv, link, dev, "unsupported link speed setting\n");
+ /*speeds other than SPEED_10 and SPEED_100 */
+ /*are not supported by chip */
+ ret = -EOPNOTSUPP;
+ }
+ } else {
+ netif_warn(priv, link, dev, "Warning: hw must be disabled to set link mode\n");
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static void encx24j600_hw_get_macaddr(struct encx24j600_priv *priv,
+ unsigned char *ethaddr)
+{
+ unsigned short val;
+
+ val = encx24j600_read_reg(priv, MAADR1);
+
+ ethaddr[0] = val & 0x00ff;
+ ethaddr[1] = (val & 0xff00) >> 8;
+
+ val = encx24j600_read_reg(priv, MAADR2);
+
+ ethaddr[2] = val & 0x00ffU;
+ ethaddr[3] = (val & 0xff00U) >> 8;
+
+ val = encx24j600_read_reg(priv, MAADR3);
+
+ ethaddr[4] = val & 0x00ffU;
+ ethaddr[5] = (val & 0xff00U) >> 8;
+}
+
+/* Program the hardware MAC address from dev->dev_addr.*/
+static int encx24j600_set_hw_macaddr(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ if (priv->hw_enabled) {
+ netif_info(priv, drv, dev, "Hardware must be disabled to set Mac address\n");
+ return -EBUSY;
+ }
+
+ mutex_lock(&priv->lock);
+
+ netif_info(priv, drv, dev, "%s: Setting MAC address to %pM\n",
+ dev->name, dev->dev_addr);
+
+ encx24j600_write_reg(priv, MAADR3, (dev->dev_addr[4] |
+ dev->dev_addr[5] << 8));
+ encx24j600_write_reg(priv, MAADR2, (dev->dev_addr[2] |
+ dev->dev_addr[3] << 8));
+ encx24j600_write_reg(priv, MAADR1, (dev->dev_addr[0] |
+ dev->dev_addr[1] << 8));
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* Store the new hardware address in dev->dev_addr, and update the MAC.*/
+static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (netif_running(dev))
+ return -EBUSY;
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ return encx24j600_set_hw_macaddr(dev);
+}
+
+static int encx24j600_open(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ int ret = request_threaded_irq(priv->ctx.spi->irq, NULL, encx24j600_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ DRV_NAME, priv);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "request irq %d failed (ret = %d)\n",
+ priv->ctx.spi->irq, ret);
+ return ret;
+ }
+
+ encx24j600_hw_disable(priv);
+ encx24j600_hw_init(priv);
+ encx24j600_hw_enable(priv);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int encx24j600_stop(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ free_irq(priv->ctx.spi->irq, priv);
+ return 0;
+}
+
+static void encx24j600_setrx_proc(struct kthread_work *ws)
+{
+ struct encx24j600_priv *priv =
+ container_of(ws, struct encx24j600_priv, setrx_work);
+
+ mutex_lock(&priv->lock);
+ encx24j600_set_rxfilter_mode(priv);
+ mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_set_multicast_list(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ int oldfilter = priv->rxfilter;
+
+ if (dev->flags & IFF_PROMISC) {
+ netif_dbg(priv, link, dev, "promiscuous mode\n");
+ priv->rxfilter = RXFILTER_PROMISC;
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
+ netif_dbg(priv, link, dev, "%smulticast mode\n",
+ (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+ priv->rxfilter = RXFILTER_MULTI;
+ } else {
+ netif_dbg(priv, link, dev, "normal mode\n");
+ priv->rxfilter = RXFILTER_NORMAL;
+ }
+
+ if (oldfilter != priv->rxfilter)
+ queue_kthread_work(&priv->kworker, &priv->setrx_work);
+}
+
+static void encx24j600_hw_tx(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
+ priv->tx_skb->len);
+
+ if (netif_msg_pktdata(priv))
+ dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
+
+ if (encx24j600_read_reg(priv, EIR) & TXABTIF)
+ /* Last transmition aborted due to error. Reset TX interface */
+ encx24j600_reset_hw_tx(priv);
+
+ /* Clear the TXIF flag if were previously set */
+ encx24j600_clr_bits(priv, EIR, TXIF);
+
+ /* Set the data pointer to the TX buffer address in the SRAM */
+ encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+
+ /* Copy the packet into the SRAM */
+ encx24j600_raw_write(priv, WGPDATA, (u8 *)priv->tx_skb->data,
+ priv->tx_skb->len);
+
+ /* Program the Tx buffer start pointer */
+ encx24j600_write_reg(priv, ETXST, ENC_TX_BUF_START);
+
+ /* Program the packet length */
+ encx24j600_write_reg(priv, ETXLEN, priv->tx_skb->len);
+
+ /* Start the transmission */
+ encx24j600_cmd(priv, SETTXRTS);
+}
+
+static void encx24j600_tx_proc(struct kthread_work *ws)
+{
+ struct encx24j600_priv *priv =
+ container_of(ws, struct encx24j600_priv, tx_work);
+
+ mutex_lock(&priv->lock);
+ encx24j600_hw_tx(priv);
+ mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ /* save the timestamp */
+ dev->trans_start = jiffies;
+
+ /* Remember the skb for deferred processing */
+ priv->tx_skb = skb;
+
+ queue_kthread_work(&priv->kworker, &priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+/* Deal with a transmit timeout */
+static void encx24j600_tx_timeout(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
+ jiffies, jiffies - dev->trans_start);
+
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev);
+ return;
+}
+
+static int encx24j600_get_regs_len(struct net_device *dev)
+{
+ return SFR_REG_COUNT;
+}
+
+static void encx24j600_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ u16 *buff = p;
+ u8 reg;
+
+ regs->version = 1;
+ mutex_lock(&priv->lock);
+ for (reg = 0; reg < SFR_REG_COUNT; reg += 2) {
+ unsigned int val = 0;
+ /* ignore errors for unreadable registers */
+ regmap_read(priv->ctx.regmap, reg, &val);
+ buff[reg] = val & 0xffff;
+ }
+ mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int encx24j600_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP;
+
+ ethtool_cmd_speed_set(cmd, priv->speed);
+ cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->port = PORT_TP;
+ cmd->autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int encx24j600_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ return encx24j600_setlink(dev, cmd->autoneg,
+ ethtool_cmd_speed(cmd), cmd->duplex);
+}
+
+static u32 encx24j600_get_msglevel(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ priv->msg_enable = val;
+}
+
+static const struct ethtool_ops encx24j600_ethtool_ops = {
+ .get_settings = encx24j600_get_settings,
+ .set_settings = encx24j600_set_settings,
+ .get_drvinfo = encx24j600_get_drvinfo,
+ .get_msglevel = encx24j600_get_msglevel,
+ .set_msglevel = encx24j600_set_msglevel,
+ .get_regs_len = encx24j600_get_regs_len,
+ .get_regs = encx24j600_get_regs,
+};
+
+static const struct net_device_ops encx24j600_netdev_ops = {
+ .ndo_open = encx24j600_open,
+ .ndo_stop = encx24j600_stop,
+ .ndo_start_xmit = encx24j600_tx,
+ .ndo_set_rx_mode = encx24j600_set_multicast_list,
+ .ndo_set_mac_address = encx24j600_set_mac_address,
+ .ndo_tx_timeout = encx24j600_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int encx24j600_spi_probe(struct spi_device *spi)
+{
+ int ret;
+
+ struct net_device *ndev;
+ struct encx24j600_priv *priv;
+
+ ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
+
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
+
+ priv = netdev_priv(ndev);
+ spi_set_drvdata(spi, priv);
+ dev_set_drvdata(&spi->dev, priv);
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ priv->ndev = ndev;
+
+ /* Default configuration PHY configuration */
+ priv->full_duplex = true;
+ priv->autoneg = AUTONEG_ENABLE;
+ priv->speed = SPEED_100;
+
+ priv->ctx.spi = spi;
+ devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+ ndev->irq = spi->irq;
+ ndev->netdev_ops = &encx24j600_netdev_ops;
+
+ mutex_init(&priv->lock);
+
+ /* Reset device and check if it is connected */
+ if (encx24j600_hw_reset(priv)) {
+ netif_err(priv, probe, ndev,
+ DRV_NAME ": Chip is not detected\n");
+ ret = -EIO;
+ goto out_free;
+ }
+
+ /* Initialize the device HW to the consistent state */
+ if (encx24j600_hw_init(priv)) {
+ netif_err(priv, probe, ndev,
+ DRV_NAME ": HW initialization error\n");
+ ret = -EIO;
+ goto out_free;
+ }
+
+ init_kthread_worker(&priv->kworker);
+ init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
+ init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+
+ priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
+ "encx24j600");
+
+ if (IS_ERR(priv->kworker_task)) {
+ ret = PTR_ERR(priv->kworker_task);
+ goto out_free;
+ }
+
+ /* Get the MAC address from the chip */
+ encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
+
+ ndev->ethtool_ops = &encx24j600_ethtool_ops;
+
+ ret = register_netdev(ndev);
+ if (unlikely(ret)) {
+ netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
+ ret);
+ goto out_free;
+ }
+
+ netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr);
+
+ return ret;
+
+out_free:
+ free_netdev(ndev);
+
+error_out:
+ return ret;
+}
+
+static int encx24j600_spi_remove(struct spi_device *spi)
+{
+ struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
+
+ unregister_netdev(priv->ndev);
+
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+static const struct spi_device_id encx24j600_spi_id_table[] = {
+ { .name = "encx24j600" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table);
+
+static struct spi_driver encx24j600_spi_net_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .bus = &spi_bus_type,
+ },
+ .probe = encx24j600_spi_probe,
+ .remove = encx24j600_spi_remove,
+ .id_table = encx24j600_spi_id_table,
+};
+
+static int __init encx24j600_init(void)
+{
+ return spi_register_driver(&encx24j600_spi_net_driver);
+}
+module_init(encx24j600_init);
+
+static void encx24j600_exit(void)
+{
+ spi_unregister_driver(&encx24j600_spi_net_driver);
+}
+module_exit(encx24j600_exit);
+
+MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
+MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
new file mode 100644
index 000000000000..4be73d5553f8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -0,0 +1,437 @@
+/**
+ * encx24j600_hw.h: Register definitions
+ *
+ */
+
+#ifndef _ENCX24J600_HW_H
+#define _ENCX24J600_HW_H
+
+struct encx24j600_context {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct regmap *phymap;
+ struct mutex mutex; /* mutex to protect access to regmap */
+ int bank;
+};
+
+void devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx);
+
+/* Single-byte instructions */
+#define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
+#define B0SEL 0xC0 /* Bank 0 Select */
+#define B1SEL 0xC2 /* Bank 1 Select */
+#define B2SEL 0xC4 /* Bank 2 Select */
+#define B3SEL 0xC6 /* Bank 3 Select */
+#define SETETHRST 0xCA /* System Reset */
+#define FCDISABLE 0xE0 /* Flow Control Disable */
+#define FCSINGLE 0xE2 /* Flow Control Single */
+#define FCMULTIPLE 0xE4 /* Flow Control Multiple */
+#define FCCLEAR 0xE6 /* Flow Control Clear */
+#define SETPKTDEC 0xCC /* Decrement Packet Counter */
+#define DMASTOP 0xD2 /* DMA Stop */
+#define DMACKSUM 0xD8 /* DMA Start Checksum */
+#define DMACKSUMS 0xDA /* DMA Start Checksum with Seed */
+#define DMACOPY 0xDC /* DMA Start Copy */
+#define DMACOPYS 0xDE /* DMA Start Copy and Checksum with Seed */
+#define SETTXRTS 0xD4 /* Request Packet Transmission */
+#define ENABLERX 0xE8 /* Enable RX */
+#define DISABLERX 0xEA /* Disable RX */
+#define SETEIE 0xEC /* Enable Interrupts */
+#define CLREIE 0xEE /* Disable Interrupts */
+
+/* Two byte instructions */
+#define RBSEL 0xC8 /* Read Bank Select */
+
+/* Three byte instructions */
+#define WGPRDPT 0x60 /* Write EGPRDPT */
+#define RGPRDPT 0x62 /* Read EGPRDPT */
+#define WRXRDPT 0x64 /* Write ERXRDPT */
+#define RRXRDPT 0x66 /* Read ERXRDPT */
+#define WUDARDPT 0x68 /* Write EUDARDPT */
+#define RUDARDPT 0x6A /* Read EUDARDPT */
+#define WGPWRPT 0x6C /* Write EGPWRPT */
+#define RGPWRPT 0x6E /* Read EGPWRPT */
+#define WRXWRPT 0x70 /* Write ERXWRPT */
+#define RRXWRPT 0x72 /* Read ERXWRPT */
+#define WUDAWRPT 0x74 /* Write EUDAWRPT */
+#define RUDAWRPT 0x76 /* Read EUDAWRPT */
+
+/* n byte instructions */
+#define RCRCODE 0x00
+#define WCRCODE 0x40
+#define BFSCODE 0x80
+#define BFCCODE 0xA0
+#define RCR(addr) (RCRCODE | (addr & ADDR_MASK)) /* Read Control Register */
+#define WCR(addr) (WCRCODE | (addr & ADDR_MASK)) /* Write Control Register */
+#define RCRU 0x20 /* Read Control Register Unbanked */
+#define WCRU 0x22 /* Write Control Register Unbanked */
+#define BFS(addr) (BFSCODE | (addr & ADDR_MASK)) /* Bit Field Set */
+#define BFC(addr) (BFCCODE | (addr & ADDR_MASK)) /* Bit Field Clear */
+#define BFSU 0x24 /* Bit Field Set Unbanked */
+#define BFCU 0x26 /* Bit Field Clear Unbanked */
+#define RGPDATA 0x28 /* Read EGPDATA */
+#define WGPDATA 0x2A /* Write EGPDATA */
+#define RRXDATA 0x2C /* Read ERXDATA */
+#define WRXDATA 0x2E /* Write ERXDATA */
+#define RUDADATA 0x30 /* Read EUDADATA */
+#define WUDADATA 0x32 /* Write EUDADATA */
+
+#define SFR_REG_COUNT 0xA0
+
+/* ENC424J600 Control Registers
+ * Control register definitions are a combination of address
+ * and bank number
+ * - Register address (bits 0-4)
+ * - Bank number (bits 5-6)
+ */
+#define ADDR_MASK 0x1F
+#define BANK_MASK 0x60
+#define BANK_SHIFT 5
+
+/* All-bank registers */
+#define EUDAST 0x16
+#define EUDAND 0x18
+#define ESTAT 0x1A
+#define EIR 0x1C
+#define ECON1 0x1E
+
+/* Bank 0 registers */
+#define ETXST (0x00 | 0x00)
+#define ETXLEN (0x02 | 0x00)
+#define ERXST (0x04 | 0x00)
+#define ERXTAIL (0x06 | 0x00)
+#define ERXHEAD (0x08 | 0x00)
+#define EDMAST (0x0A | 0x00)
+#define EDMALEN (0x0C | 0x00)
+#define EDMADST (0x0E | 0x00)
+#define EDMACS (0x10 | 0x00)
+#define ETXSTAT (0x12 | 0x00)
+#define ETXWIRE (0x14 | 0x00)
+
+/* Bank 1 registers */
+#define EHT1 (0x00 | 0x20)
+#define EHT2 (0x02 | 0x20)
+#define EHT3 (0x04 | 0x20)
+#define EHT4 (0x06 | 0x20)
+#define EPMM1 (0x08 | 0x20)
+#define EPMM2 (0x0A | 0x20)
+#define EPMM3 (0x0C | 0x20)
+#define EPMM4 (0x0E | 0x20)
+#define EPMCS (0x10 | 0x20)
+#define EPMO (0x12 | 0x20)
+#define ERXFCON (0x14 | 0x20)
+
+/* Bank 2 registers */
+#define MACON1 (0x00 | 0x40)
+#define MACON2 (0x02 | 0x40)
+#define MABBIPG (0x04 | 0x40)
+#define MAIPG (0x06 | 0x40)
+#define MACLCON (0x08 | 0x40)
+#define MAMXFL (0x0A | 0x40)
+#define MICMD (0x12 | 0x40)
+#define MIREGADR (0x14 | 0x40)
+
+/* Bank 3 registers */
+#define MAADR3 (0x00 | 0x60)
+#define MAADR2 (0x02 | 0x60)
+#define MAADR1 (0x04 | 0x60)
+#define MIWR (0x06 | 0x60)
+#define MIRD (0x08 | 0x60)
+#define MISTAT (0x0A | 0x60)
+#define EPAUS (0x0C | 0x60)
+#define ECON2 (0x0E | 0x60)
+#define ERXWM (0x10 | 0x60)
+#define EIE (0x12 | 0x60)
+#define EIDLED (0x14 | 0x60)
+
+/* Unbanked registers */
+#define EGPDATA (0x00 | 0x80)
+#define ERXDATA (0x02 | 0x80)
+#define EUDADATA (0x04 | 0x80)
+#define EGPRDPT (0x06 | 0x80)
+#define EGPWRPT (0x08 | 0x80)
+#define ERXRDPT (0x0A | 0x80)
+#define ERXWRPT (0x0C | 0x80)
+#define EUDARDPT (0x0E | 0x80)
+#define EUDAWRPT (0x10 | 0x80)
+
+
+/* Register bit definitions */
+/* ESTAT */
+#define INT (1 << 15)
+#define FCIDLE (1 << 14)
+#define RXBUSY (1 << 13)
+#define CLKRDY (1 << 12)
+#define PHYDPX (1 << 10)
+#define PHYLNK (1 << 8)
+
+/* EIR */
+#define CRYPTEN (1 << 15)
+#define MODEXIF (1 << 14)
+#define HASHIF (1 << 13)
+#define AESIF (1 << 12)
+#define LINKIF (1 << 11)
+#define PKTIF (1 << 6)
+#define DMAIF (1 << 5)
+#define TXIF (1 << 3)
+#define TXABTIF (1 << 2)
+#define RXABTIF (1 << 1)
+#define PCFULIF (1 << 0)
+
+/* ECON1 */
+#define MODEXST (1 << 15)
+#define HASHEN (1 << 14)
+#define HASHOP (1 << 13)
+#define HASHLST (1 << 12)
+#define AESST (1 << 11)
+#define AESOP1 (1 << 10)
+#define AESOP0 (1 << 9)
+#define PKTDEC (1 << 8)
+#define FCOP1 (1 << 7)
+#define FCOP0 (1 << 6)
+#define DMAST (1 << 5)
+#define DMACPY (1 << 4)
+#define DMACSSD (1 << 3)
+#define DMANOCS (1 << 2)
+#define TXRTS (1 << 1)
+#define RXEN (1 << 0)
+
+/* ETXSTAT */
+#define LATECOL (1 << 10)
+#define MAXCOL (1 << 9)
+#define EXDEFER (1 << 8)
+#define ETXSTATL_DEFER (1 << 7)
+#define CRCBAD (1 << 4)
+#define COLCNT_MASK 0xF
+
+/* ERXFCON */
+#define HTEN (1 << 15)
+#define MPEN (1 << 14)
+#define NOTPM (1 << 12)
+#define PMEN3 (1 << 11)
+#define PMEN2 (1 << 10)
+#define PMEN1 (1 << 9)
+#define PMEN0 (1 << 8)
+#define CRCEEN (1 << 7)
+#define CRCEN (1 << 6)
+#define RUNTEEN (1 << 5)
+#define RUNTEN (1 << 4)
+#define UCEN (1 << 3)
+#define NOTMEEN (1 << 2)
+#define MCEN (1 << 1)
+#define BCEN (1 << 0)
+
+/* MACON1 */
+#define LOOPBK (1 << 4)
+#define RXPAUS (1 << 2)
+#define PASSALL (1 << 1)
+
+/* MACON2 */
+#define MACON2_DEFER (1 << 14)
+#define BPEN (1 << 13)
+#define NOBKOFF (1 << 12)
+#define PADCFG2 (1 << 7)
+#define PADCFG1 (1 << 6)
+#define PADCFG0 (1 << 5)
+#define TXCRCEN (1 << 4)
+#define PHDREN (1 << 3)
+#define HFRMEN (1 << 2)
+#define MACON2_RSV1 (1 << 1)
+#define FULDPX (1 << 0)
+
+/* MAIPG */
+/* value of the high byte is given by the reserved bits,
+ * value of the low byte is recomended setting of the
+ * IPG parameter.
+ */
+#define MAIPGH_VAL 0x0C
+#define MAIPGL_VAL 0x12
+
+/* MIREGADRH */
+#define MIREGADR_VAL (1 << 8)
+
+/* MIREGADRL */
+#define PHREG_MASK 0x1F
+
+/* MICMD */
+#define MIISCAN (1 << 1)
+#define MIIRD (1 << 0)
+
+/* MISTAT */
+#define NVALID (1 << 2)
+#define SCAN (1 << 1)
+#define BUSY (1 << 0)
+
+/* ECON2 */
+#define ETHEN (1 << 15)
+#define STRCH (1 << 14)
+#define TXMAC (1 << 13)
+#define SHA1MD5 (1 << 12)
+#define COCON3 (1 << 11)
+#define COCON2 (1 << 10)
+#define COCON1 (1 << 9)
+#define COCON0 (1 << 8)
+#define AUTOFC (1 << 7)
+#define TXRST (1 << 6)
+#define RXRST (1 << 5)
+#define ETHRST (1 << 4)
+#define MODLEN1 (1 << 3)
+#define MODLEN0 (1 << 2)
+#define AESLEN1 (1 << 1)
+#define AESLEN0 (1 << 0)
+
+/* EIE */
+#define INTIE (1 << 15)
+#define MODEXIE (1 << 14)
+#define HASHIE (1 << 13)
+#define AESIE (1 << 12)
+#define LINKIE (1 << 11)
+#define PKTIE (1 << 6)
+#define DMAIE (1 << 5)
+#define TXIE (1 << 3)
+#define TXABTIE (1 << 2)
+#define RXABTIE (1 << 1)
+#define PCFULIE (1 << 0)
+
+/* EIDLED */
+#define LACFG3 (1 << 15)
+#define LACFG2 (1 << 14)
+#define LACFG1 (1 << 13)
+#define LACFG0 (1 << 12)
+#define LBCFG3 (1 << 11)
+#define LBCFG2 (1 << 10)
+#define LBCFG1 (1 << 9)
+#define LBCFG0 (1 << 8)
+#define DEVID_SHIFT 5
+#define DEVID_MASK (0x7 << DEVID_SHIFT)
+#define REVID_SHIFT 0
+#define REVID_MASK (0x1F << REVID_SHIFT)
+
+/* PHY registers */
+#define PHCON1 0x00
+#define PHSTAT1 0x01
+#define PHANA 0x04
+#define PHANLPA 0x05
+#define PHANE 0x06
+#define PHCON2 0x11
+#define PHSTAT2 0x1B
+#define PHSTAT3 0x1F
+
+/* PHCON1 */
+#define PRST (1 << 15)
+#define PLOOPBK (1 << 14)
+#define SPD100 (1 << 13)
+#define ANEN (1 << 12)
+#define PSLEEP (1 << 11)
+#define RENEG (1 << 9)
+#define PFULDPX (1 << 8)
+
+/* PHSTAT1 */
+#define FULL100 (1 << 14)
+#define HALF100 (1 << 13)
+#define FULL10 (1 << 12)
+#define HALF10 (1 << 11)
+#define ANDONE (1 << 5)
+#define LRFAULT (1 << 4)
+#define ANABLE (1 << 3)
+#define LLSTAT (1 << 2)
+#define EXTREGS (1 << 0)
+
+/* PHSTAT2 */
+#define PLRITY (1 << 4)
+
+/* PHSTAT3 */
+#define PHY3SPD100 (1 << 3)
+#define PHY3DPX (1 << 4)
+#define SPDDPX_SHIFT 2
+#define SPDDPX_MASK (0x7 << SPDDPX_SHIFT)
+
+/* PHANA */
+/* Default value for PHY initialization*/
+#define PHANA_DEFAULT 0x05E1
+
+/* PHANE */
+#define PDFLT (1 << 4)
+#define LPARCD (1 << 1)
+#define LPANABL (1 << 0)
+
+#define EUDAST_TEST_VAL 0x1234
+
+#define TSV_SIZE 7
+
+#define ENCX24J600_DEV_ID 0x1
+
+/* Configuration */
+
+/* Led is on when the link is present and driven low
+ * temporarily when packet is TX'd or RX'd
+ */
+#define LED_A_SETTINGS 0xC
+
+/* Led is on if the link is in 100 Mbps mode */
+#define LED_B_SETTINGS 0x8
+
+/* maximum ethernet frame length
+ * Currently not used as a limit anywhere
+ * (we're using the "huge frame enable" feature of
+ * enc424j600).
+ */
+#define MAX_FRAMELEN 1518
+
+/* Size in bytes of the receive buffer in enc424j600.
+ * Must be word aligned (even).
+ */
+#define RX_BUFFER_SIZE (15 * MAX_FRAMELEN)
+
+/* Start of the general purpose area in sram */
+#define SRAM_GP_START 0x0
+
+/* SRAM size */
+#define SRAM_SIZE 0x6000
+
+/* Start of the receive buffer */
+#define ERXST_VAL (SRAM_SIZE - RX_BUFFER_SIZE)
+
+#define RSV_RXLONGEVDROPEV 16
+#define RSV_CARRIEREV 18
+#define RSV_CRCERROR 20
+#define RSV_LENCHECKERR 21
+#define RSV_LENOUTOFRANGE 22
+#define RSV_RXOK 23
+#define RSV_RXMULTICAST 24
+#define RSV_RXBROADCAST 25
+#define RSV_DRIBBLENIBBLE 26
+#define RSV_RXCONTROLFRAME 27
+#define RSV_RXPAUSEFRAME 28
+#define RSV_RXUNKNOWNOPCODE 29
+#define RSV_RXTYPEVLAN 30
+
+#define RSV_RUNTFILTERMATCH 31
+#define RSV_NOTMEFILTERMATCH 32
+#define RSV_HASHFILTERMATCH 33
+#define RSV_MAGICPKTFILTERMATCH 34
+#define RSV_PTRNMTCHFILTERMATCH 35
+#define RSV_UNICASTFILTERMATCH 36
+
+#define RSV_SIZE 8
+#define RSV_BITMASK(x) (1 << ((x) - 16))
+#define RSV_GETBIT(x, y) (((x) & RSV_BITMASK(y)) ? 1 : 0)
+
+struct rsv {
+ u16 next_packet;
+ u16 len;
+ u32 rxstat;
+};
+
+/* Put RX buffer at 0 as suggested by the Errata datasheet */
+
+#define RXSTART_INIT ERXST_VAL
+#define RXEND_INIT 0x5FFF
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+ size_t count);
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count);
+
+
+#endif
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 2d1b94274079..9ba975853ec6 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5389,8 +5389,6 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
strlcpy(info->version, s2io_driver_version, sizeof(info->version));
strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
- info->regdump_len = XENA_REG_SPACE;
- info->eedump_len = XENA_EEPROM_SPACE;
}
/**
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index be916eb2f2e7..9a2967016c18 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -105,10 +105,6 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
- info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
- * vdev->no_of_vpath;
-
- info->n_stats = STAT_LEN;
}
/**
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 7bf9c028d8d7..c177c7cec13b 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1344,10 +1344,6 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
- info->n_stats = 0;
- info->testinfo_len = 0;
- info->regdump_len = 0;
- info->eedump_len = 0;
}
static int octeon_mgmt_get_settings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index f6fcf7450352..b19be7c6c1f4 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -164,7 +164,6 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
}
/**
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index f1f0108c275d..30a6f246dfc9 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -91,4 +91,15 @@ config NETXEN_NIC
---help---
This enables the support for NetXen's Gigabit Ethernet card.
+config QED
+ tristate "QLogic QED 25/40/100Gb core driver"
+ depends on PCI
+ ---help---
+ This enables the support for ...
+
+config QEDE
+ tristate "QLogic QED 25/40/100Gb Ethernet NIC"
+ depends on QED
+ ---help---
+ This enables the support for ...
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
index b2a283d9ae60..cee90e05beb8 100644
--- a/drivers/net/ethernet/qlogic/Makefile
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_QLCNIC) += qlcnic/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NETXEN_NIC) += netxen/
+obj-$(CONFIG_QED) += qed/
+obj-$(CONFIG_QEDE)+= qede/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 87e073c6e291..f9034467736c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -93,8 +93,6 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
- drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
}
static int
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
new file mode 100644
index 000000000000..5c2fd57236fe
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_QED) := qed.o
+
+qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
+ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
new file mode 100644
index 000000000000..ac17d8669b1a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -0,0 +1,496 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_H
+#define _QED_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/zlib.h>
+#include <linux/hashtable.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+
+extern const struct qed_common_ops qed_common_ops_pass;
+#define DRV_MODULE_VERSION "8.4.0.0"
+
+#define MAX_HWFNS_PER_DEVICE (4)
+#define NAME_SIZE 16
+#define VER_SIZE 16
+
+/* cau states */
+enum qed_coalescing_mode {
+ QED_COAL_MODE_DISABLE,
+ QED_COAL_MODE_ENABLE
+};
+
+struct qed_eth_cb_ops;
+struct qed_dev_info;
+
+/* helpers */
+static inline u32 qed_db_addr(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+ return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
+ ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+ ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
+
+#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+ (val == (cond1) ? true1 : \
+ (val == (cond2) ? true2 : def))
+
+/* forward */
+struct qed_ptt_pool;
+struct qed_spq;
+struct qed_sb_info;
+struct qed_sb_attn_info;
+struct qed_cxt_mngr;
+struct qed_sb_sp_info;
+struct qed_mcp_info;
+
+struct qed_rt_data {
+ u32 init_val;
+ bool b_valid;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum qed_pci_personality {
+ QED_PCI_ETH,
+ QED_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct qed_qm_iids {
+ u32 cids;
+ u32 vf_cids;
+ u32 tids;
+};
+
+enum QED_RESOURCES {
+ QED_SB,
+ QED_L2_QUEUE,
+ QED_VPORT,
+ QED_RSS_ENG,
+ QED_PQ,
+ QED_RL,
+ QED_MAC,
+ QED_VLAN,
+ QED_ILT,
+ QED_MAX_RESC,
+};
+
+enum QED_FEATURE {
+ QED_PF_L2_QUE,
+ QED_MAX_FEATURES,
+};
+
+enum QED_PORT_MODE {
+ QED_PORT_MODE_DE_2X40G,
+ QED_PORT_MODE_DE_2X50G,
+ QED_PORT_MODE_DE_1X100G,
+ QED_PORT_MODE_DE_4X10G_F,
+ QED_PORT_MODE_DE_4X10G_E,
+ QED_PORT_MODE_DE_4X20G,
+ QED_PORT_MODE_DE_1X40G,
+ QED_PORT_MODE_DE_2X25G,
+ QED_PORT_MODE_DE_1X25G
+};
+
+struct qed_hw_info {
+ /* PCI personality */
+ enum qed_pci_personality personality;
+
+ /* Resource Allocation scheme results */
+ u32 resc_start[QED_MAX_RESC];
+ u32 resc_num[QED_MAX_RESC];
+ u32 feat_num[QED_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+ u8 num_tc;
+ u8 offload_tc;
+ u8 non_offload_tc;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 ovlan;
+ u32 part_num[4];
+
+ u32 vendor_id;
+ u32 device_id;
+
+ unsigned char hw_mac_addr[ETH_ALEN];
+
+ struct qed_igu_info *p_igu_info;
+
+ u32 port_mode;
+ u32 hw_mode;
+};
+
+struct qed_hw_cid_data {
+ u32 cid;
+ bool b_cid_allocated;
+
+ /* Additional identifiers */
+ u16 opaque_fid;
+ u8 vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE 0x2000
+
+struct qed_dmae_info {
+ /* Mutex for synchronizing access to functions */
+ struct mutex mutex;
+
+ u8 channel;
+
+ dma_addr_t completion_word_phys_addr;
+
+ /* The memory location where the DMAE writes the completion
+ * value when an operation is finished on this context.
+ */
+ u32 *p_completion_word;
+
+ dma_addr_t intermediate_buffer_phys_addr;
+
+ /* An intermediate buffer for DMAE operations that use virtual
+ * addresses - data is DMA'd to/from this buffer and then
+ * memcpy'd to/from the virtual address
+ */
+ u32 *p_intermediate_buffer;
+
+ dma_addr_t dmae_cmd_phys_addr;
+ struct dmae_cmd *p_dmae_cmd;
+};
+
+struct qed_qm_info {
+ struct init_qm_pq_params *qm_pq_params;
+ struct init_qm_vport_params *qm_vport_params;
+ struct init_qm_port_params *qm_port_params;
+ u16 start_pq;
+ u8 start_vport;
+ u8 pure_lb_pq;
+ u8 offload_pq;
+ u8 pure_ack_pq;
+ u8 vf_queues_offset;
+ u16 num_pqs;
+ u16 num_vf_pqs;
+ u8 num_vports;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ u8 pf_wfq;
+ u32 pf_rl;
+};
+
+struct storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct qed_storm_stats {
+ struct storm_stats mstats;
+ struct storm_stats pstats;
+ struct storm_stats tstats;
+ struct storm_stats ustats;
+};
+
+struct qed_fw_data {
+ struct fw_ver_info *fw_ver_info;
+ const u8 *modes_tree_buf;
+ union init_op *init_ops;
+ const u32 *arr_data;
+ u32 init_ops_size;
+};
+
+struct qed_simd_fp_handler {
+ void *token;
+ void (*func)(void *);
+};
+
+struct qed_hwfn {
+ struct qed_dev *cdev;
+ u8 my_id; /* ID inside the PF */
+#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
+ u8 rel_pf_id; /* Relative to engine*/
+ u8 abs_pf_id;
+#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
+ u8 port_id;
+ bool b_active;
+
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ bool first_on_engine;
+ bool hw_init_done;
+
+ /* BAR access */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PTT pool */
+ struct qed_ptt_pool *p_ptt_pool;
+
+ /* HW info */
+ struct qed_hw_info hw_info;
+
+ /* rt_array (for init-tool) */
+ struct qed_rt_data *rt_data;
+
+ /* SPQ */
+ struct qed_spq *p_spq;
+
+ /* EQ */
+ struct qed_eq *p_eq;
+
+ /* Consolidate Q*/
+ struct qed_consq *p_consq;
+
+ /* Slow-Path definitions */
+ struct tasklet_struct *sp_dpc;
+ bool b_sp_dpc_enabled;
+
+ struct qed_ptt *p_main_ptt;
+ struct qed_ptt *p_dpc_ptt;
+
+ struct qed_sb_sp_info *p_sp_sb;
+ struct qed_sb_attn_info *p_sb_attn;
+
+ /* Protocol related */
+ struct qed_pf_params pf_params;
+
+ /* Array of sb_info of all status blocks */
+ struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
+
+ struct qed_cxt_mngr *p_cxt_mngr;
+
+ /* Flag indicating whether interrupts are enabled or not*/
+ bool b_int_enabled;
+
+ struct qed_mcp_info *mcp_info;
+
+ struct qed_hw_cid_data *p_tx_cids;
+ struct qed_hw_cid_data *p_rx_cids;
+
+ struct qed_dmae_info dmae_info;
+
+ /* QM init */
+ struct qed_qm_info qm_info;
+ struct qed_storm_stats storm_stats;
+
+ /* Buffer for unzipping firmware data */
+ void *unzip_buf;
+
+ struct qed_simd_fp_handler simd_proto_handler[64];
+
+ struct z_stream_s *stream;
+};
+
+struct pci_params {
+ int pm_cap;
+
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned int irq;
+ u8 pf_num;
+};
+
+struct qed_int_param {
+ u32 int_mode;
+ u8 num_vectors;
+ u8 min_msix_cnt; /* for minimal functionality */
+};
+
+struct qed_int_params {
+ struct qed_int_param in;
+ struct qed_int_param out;
+ struct msix_entry *msix_table;
+ bool fp_initialized;
+ u8 fp_msix_base;
+ u8 fp_msix_cnt;
+};
+
+struct qed_dev {
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ u8 type;
+#define QED_DEV_TYPE_BB_A0 (0 << 0)
+#define QED_DEV_TYPE_MASK (0x3)
+#define QED_DEV_TYPE_SHIFT (0)
+
+ u16 chip_num;
+#define CHIP_NUM_MASK 0xffff
+#define CHIP_NUM_SHIFT 16
+
+ u16 chip_rev;
+#define CHIP_REV_MASK 0xf
+#define CHIP_REV_SHIFT 12
+
+ u16 chip_metal;
+#define CHIP_METAL_MASK 0xff
+#define CHIP_METAL_SHIFT 4
+
+ u16 chip_bond_id;
+#define CHIP_BOND_ID_MASK 0xf
+#define CHIP_BOND_ID_SHIFT 0
+
+ u8 num_engines;
+ u8 num_ports_in_engines;
+ u8 num_funcs_in_port;
+
+ u8 path_id;
+ enum mf_mode mf_mode;
+#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF)
+#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
+#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
+
+ int pcie_width;
+ int pcie_speed;
+ u8 ver_str[VER_SIZE];
+
+ /* Add MF related configuration */
+ u8 mcp_rev;
+ u8 boot_mode;
+
+ u8 wol;
+
+ u32 int_mode;
+ enum qed_coalescing_mode int_coalescing_mode;
+ u8 rx_coalesce_usecs;
+ u8 tx_coalesce_usecs;
+
+ /* Start Bar offset of first hwfn */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PCI */
+ u8 cache_shift;
+
+ /* Init */
+ const struct iro *iro_arr;
+#define IRO (p_hwfn->cdev->iro_arr)
+
+ /* HW functions */
+ u8 num_hwfns;
+ struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+
+ u32 drv_type;
+
+ struct qed_eth_stats *reset_stats;
+ struct qed_fw_data *fw_data;
+
+ u32 mcp_nvm_resp;
+
+ /* Linux specific here */
+ struct qede_dev *edev;
+ struct pci_dev *pdev;
+ int msg_enable;
+
+ struct pci_params pci_params;
+
+ struct qed_int_params int_params;
+
+ u8 protocol;
+#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
+
+ /* Callbacks to protocol driver */
+ union {
+ struct qed_common_cb_ops *common;
+ struct qed_eth_cb_ops *eth;
+ } protocol_ops;
+ void *ops_cookie;
+
+ const struct firmware *firmware;
+};
+
+#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \
+ QED_DEV_TYPE_SHIFT)
+#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
+#define QED_IS_BB(dev) (QED_IS_BB_A0(dev))
+
+#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
+#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
+
+/**
+ * @brief qed_concrete_to_sw_fid - get the sw function id from
+ * the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return inline u8
+ */
+static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+ u32 concrete_fid)
+{
+ u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+
+ return pfid;
+}
+
+#define PURE_LB_TC 8
+
+#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+/* Other Linux specific common definitions */
+#define DP_NAME(cdev) ((cdev)->name)
+
+#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
+ (cdev->regview) + \
+ (offset))
+
+#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
+#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
+#define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
+
+#define DOORBELL(cdev, db_addr, val) \
+ writel((u32)val, (void __iomem *)((u8 __iomem *)\
+ (cdev->doorbells) + (db_addr)))
+
+/* Prototypes */
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info);
+void qed_link_update(struct qed_hwfn *hwfn);
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
+ u32 input_len, u8 *input_buf,
+ u32 max_size, u8 *unzip_buf);
+
+#define QED_ETH_INTERFACE_VERSION 300
+
+#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
new file mode 100644
index 000000000000..7ccdb46c6764
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -0,0 +1,847 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+
+/* QM constants */
+#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
+
+/* ILT constants */
+#define ILT_DEFAULT_HW_P_SIZE 3
+#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT 0
+#define ILT_ENTRY_VALID_MASK 0x1ULL
+#define ILT_ENTRY_VALID_SHIFT 52
+#define ILT_ENTRY_IN_REGS 2
+#define ILT_REG_SIZE_IN_BYTES 4
+
+/* connection context union */
+union conn_context {
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+};
+
+#define CONN_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+/* PF per protocl configuration object */
+struct qed_conn_type_cfg {
+ u32 cid_count;
+ u32 cid_start;
+};
+
+/* ILT Client configuration, Per connection type (protocol) resources. */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_QM,
+ ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct qed_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+};
+
+struct qed_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+};
+
+/* Per Path -
+ * ILT shadow table
+ * Protocol acquired CID lists
+ * PF start line in ILT
+ */
+struct qed_dma_mem {
+ dma_addr_t p_phys;
+ void *p_virt;
+ size_t size;
+};
+
+struct qed_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct qed_cxt_mngr {
+ /* Per protocl configuration */
+ struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+
+ /* Acquired CIDs */
+ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+ /* ILT shadow table */
+ struct qed_dma_mem *ilt_shadow;
+ u32 pf_start_line;
+};
+
+static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
+{
+ u32 type, pf_cids = 0;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++)
+ pf_cids += p_mngr->conn_cfg[type].cid_count;
+
+ return pf_cids;
+}
+
+static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_qm_iids *iids)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ int type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++)
+ iids->cids += p_mngr->conn_cfg[type].cid_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+}
+
+/* set the iids count per protocol */
+static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 cid_count)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+ p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+}
+
+static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 start_line, u32 total_size,
+ u32 elem_size)
+{
+ u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ /* verify thatits called only once for each block */
+ if (p_blk->total_size)
+ return;
+
+ p_blk->total_size = total_size;
+ p_blk->real_size_in_page = 0;
+ if (elem_size)
+ p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+ p_blk->start_line = start_line;
+}
+
+static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 *p_line, enum ilt_clients client_id)
+{
+ if (!p_blk->total_size)
+ return;
+
+ if (!p_cli->active)
+ p_cli->first.val = *p_line;
+
+ p_cli->active = true;
+ *p_line += DIV_ROUND_UP(p_blk->total_size,
+ p_blk->real_size_in_page);
+ p_cli->last.val = *p_line - 1;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+ client_id, p_cli->first.val,
+ p_cli->last.val, p_blk->total_size,
+ p_blk->real_size_in_page, p_blk->start_line);
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 curr_line, total, pf_cids;
+ struct qed_qm_iids qm_iids;
+
+ memset(&qm_iids, 0, sizeof(qm_iids));
+
+ p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+ p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+ /* CDUC */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+ curr_line = p_mngr->pf_start_line;
+ p_cli->pf_total_lines = 0;
+
+ /* get the counters for the CDUC and QM clients */
+ pf_cids = qed_cxt_cdu_iids(p_mngr);
+
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+ total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ /* QM */
+ p_cli = &p_mngr->clients[ILT_CLI_QM];
+ p_blk = &p_cli->pf_blks[0];
+
+ qed_cxt_qm_iids(p_hwfn, &qm_iids);
+ total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
+ p_hwfn->qm_info.num_pqs, 0);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total * 0x1000,
+ QM_PQ_ELEMENT_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+ RESC_NUM(p_hwfn, QED_ILT)) {
+ DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+ curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define for_each_ilt_valid_client(pos, clients) \
+ for (pos = 0; pos < ILT_CLI_MAX; pos++)
+
+/* Total number of ILT lines used by this PF */
+static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
+{
+ u32 size = 0;
+ u32 i;
+
+ for_each_ilt_valid_client(i, ilt_clients) {
+ if (!ilt_clients[i].active)
+ continue;
+ size += (ilt_clients[i].last.val -
+ ilt_clients[i].first.val + 1);
+ }
+
+ return size;
+}
+
+static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 ilt_size, i;
+
+ ilt_size = qed_cxt_ilt_shadow_size(p_cli);
+
+ for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+ struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+ if (p_dma->p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_dma->size, p_dma->p_virt,
+ p_dma->p_phys);
+ p_dma->p_virt = NULL;
+ }
+ kfree(p_mngr->ilt_shadow);
+}
+
+static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_cli_blk *p_blk,
+ enum ilt_clients ilt_client,
+ u32 start_line_offset)
+{
+ struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left;
+
+ if (!p_blk->total_size)
+ return 0;
+
+ sz_left = p_blk->total_size;
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
+ line = p_blk->start_line + start_line_offset -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+
+ for (; lines; lines--) {
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 size;
+
+ size = min_t(u32, sz_left,
+ p_blk->real_size_in_page);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_phys,
+ GFP_KERNEL);
+ if (!p_virt)
+ return -ENOMEM;
+ memset(p_virt, 0, size);
+
+ ilt_shadow[line].p_phys = p_phys;
+ ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].size = size;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
+ line, (u64)p_phys, p_virt, size);
+
+ sz_left -= size;
+ line++;
+ }
+
+ return 0;
+}
+
+static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *clients = p_mngr->clients;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 size, i, j;
+ int rc;
+
+ size = qed_cxt_ilt_shadow_size(clients);
+ p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+ GFP_KERNEL);
+ if (!p_mngr->ilt_shadow) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
+ rc = -ENOMEM;
+ goto ilt_shadow_fail;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Allocated 0x%x bytes for ilt shadow\n",
+ (u32)(size * sizeof(struct qed_dma_mem)));
+
+ for_each_ilt_valid_client(i, clients) {
+ if (!clients[i].active)
+ continue;
+ for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+ p_blk = &clients[i].pf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+ if (rc != 0)
+ goto ilt_shadow_fail;
+ }
+ }
+
+ return 0;
+
+ilt_shadow_fail:
+ qed_ilt_shadow_free(p_hwfn);
+ return rc;
+}
+
+static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ kfree(p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].max_count = 0;
+ p_mngr->acquired[type].start_cid = 0;
+ }
+}
+
+static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+ u32 size;
+
+ if (cid_cnt == 0)
+ continue;
+
+ size = DIV_ROUND_UP(cid_cnt,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
+ if (!p_mngr->acquired[type].cid_map)
+ goto cid_map_fail;
+
+ p_mngr->acquired[type].max_count = cid_cnt;
+ p_mngr->acquired[type].start_cid = start_cid;
+
+ p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_mngr->acquired[type].start_cid,
+ p_mngr->acquired[type].max_count);
+ start_cid += cid_cnt;
+ }
+
+ return 0;
+
+cid_map_fail:
+ qed_cid_map_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr;
+ u32 i;
+
+ p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
+ if (!p_mngr) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize ILT client registers */
+ p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ /* default ILT page size for all clients is 32K */
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+ /* Set the cxt mangr pointer priori to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
+ return 0;
+}
+
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate the ILT shadow table */
+ rc = qed_ilt_shadow_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate and initialize the acquired cids bitmaps */
+ rc = qed_cid_map_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+ goto tables_alloc_fail;
+ }
+
+ return 0;
+
+tables_alloc_fail:
+ qed_cxt_mngr_free(p_hwfn);
+ return rc;
+}
+
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_cxt_mngr)
+ return;
+
+ qed_cid_map_free(p_hwfn);
+ qed_ilt_shadow_free(p_hwfn);
+ kfree(p_hwfn->p_cxt_mngr);
+
+ p_hwfn->p_cxt_mngr = NULL;
+}
+
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ int type;
+
+ /* Reset acquired cids */
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+
+ if (cid_cnt == 0)
+ continue;
+
+ memset(p_mngr->acquired[type].cid_map, 0,
+ DIV_ROUND_UP(cid_cnt,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long));
+ }
+}
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
+{
+ u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+ /* CDUC - connection configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+ cxt_size = CONN_CXT_SIZE(p_hwfn);
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+ SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+ SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+}
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_pf_rt_init_params params;
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_iids iids;
+
+ memset(&iids, 0, sizeof(iids));
+ qed_cxt_qm_iids(p_hwfn, &iids);
+
+ memset(&params, 0, sizeof(params));
+ params.port_id = p_hwfn->port_id;
+ params.pf_id = p_hwfn->rel_pf_id;
+ params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params.is_first_pf = p_hwfn->first_on_engine;
+ params.num_pf_cids = iids.cids;
+ params.start_pq = qm_info->start_pq;
+ params.num_pf_pqs = qm_info->num_pqs;
+ params.start_vport = qm_info->num_vports;
+ params.pf_wfq = qm_info->pf_wfq;
+ params.pf_rl = qm_info->pf_rl;
+ params.pq_params = qm_info->qm_pq_params;
+ params.vport_params = qm_info->qm_vport_params;
+
+ qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+}
+
+/* CM PF */
+static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ union qed_qm_pq_params pq_params;
+ u16 pq;
+
+ /* XCM pure-LB queue */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+ return 0;
+}
+
+/* DQ PF */
+static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_pf_max_cid = 0;
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+ /* 5 - PF */
+ dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+}
+
+static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *ilt_clients;
+ int i;
+
+ ilt_clients = p_hwfn->p_cxt_mngr->clients;
+ for_each_ilt_valid_client(i, ilt_clients) {
+ if (!ilt_clients[i].active)
+ continue;
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].first.reg,
+ ilt_clients[i].first.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].last.reg,
+ ilt_clients[i].last.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].p_size.reg,
+ ilt_clients[i].p_size.val);
+ }
+}
+
+/* ILT (PSWRQ2) PF */
+static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients;
+ struct qed_cxt_mngr *p_mngr;
+ struct qed_dma_mem *p_shdw;
+ u32 line, rt_offst, i;
+
+ qed_ilt_bounds_init(p_hwfn);
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_shdw = p_mngr->ilt_shadow;
+ clients = p_hwfn->p_cxt_mngr->clients;
+
+ for_each_ilt_valid_client(i, clients) {
+ if (!clients[i].active)
+ continue;
+
+ /** Client's 1st val and RT array are absolute, ILT shadows'
+ * lines are relative.
+ */
+ line = clients[i].first.val - p_mngr->pf_start_line;
+ rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+ clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+ for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+ line++, rt_offst += ILT_ENTRY_IN_REGS) {
+ u64 ilt_hw_entry = 0;
+
+ /** p_virt could be NULL incase of dynamic
+ * allocation
+ */
+ if (p_shdw[line].p_virt) {
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_shdw[line].p_phys >> 12));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
+ rt_offst, line, i,
+ (u64)(p_shdw[line].p_phys >> 12));
+ }
+
+ STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+ }
+ }
+}
+
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
+{
+ qed_cdu_init_common(p_hwfn);
+}
+
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+{
+ qed_qm_init_pf(p_hwfn);
+ qed_cm_init_pf(p_hwfn);
+ qed_dq_init_pf(p_hwfn);
+ qed_ilt_init_pf(p_hwfn);
+}
+
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rel_cid;
+
+ if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+ return -EINVAL;
+ }
+
+ rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
+ p_mngr->acquired[type].max_count);
+
+ if (rel_cid >= p_mngr->acquired[type].max_count) {
+ DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+
+ *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+ return 0;
+}
+
+static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
+ u32 cid,
+ enum protocol_type *p_type)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
+ enum protocol_type p;
+ u32 rel_cid;
+
+ /* Iterate over protocols and find matching cid range */
+ for (p = 0; p < MAX_CONN_TYPES; p++) {
+ p_map = &p_mngr->acquired[p];
+
+ if (!p_map->cid_map)
+ continue;
+ if (cid >= p_map->start_cid &&
+ cid < p_map->start_cid + p_map->max_count)
+ break;
+ }
+ *p_type = p;
+
+ if (p == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
+ return false;
+ }
+
+ rel_cid = cid - p_map->start_cid;
+ if (!test_bit(rel_cid, p_map->cid_map)) {
+ DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
+ return false;
+ }
+ return true;
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+ u32 cid)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ enum protocol_type type;
+ bool b_acquired;
+ u32 rel_cid;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+ if (!b_acquired)
+ return;
+
+ rel_cid = cid - p_mngr->acquired[type].start_cid;
+ __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+ enum protocol_type type;
+ bool b_acquired;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+ if (!b_acquired)
+ return -EINVAL;
+
+ /* set the protocl type */
+ p_info->type = type;
+
+ /* compute context virtual pointer */
+ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+ conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+ cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+ line = p_info->iid / cxts_per_p;
+
+ /* Make sure context is allocated (dynamic allocation) */
+ if (!p_mngr->ilt_shadow[line].p_virt)
+ return -EINVAL;
+
+ p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+ p_info->iid % cxts_per_p * conn_cxt_size;
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
+ "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+ p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
+
+ return 0;
+}
+
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
+
+ /* Set the number of required CORE connections */
+ u32 core_cids = 1; /* SPQ */
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
new file mode 100644
index 000000000000..c8e1f5e5c42b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CXT_H
+#define _QED_CXT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+#include "qed.h"
+
+struct qed_cxt_info {
+ void *p_cxt;
+ u32 iid;
+ enum protocol_type type;
+};
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
+/**
+ * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ *
+ *
+ * @param p_hwfn
+ * @param p_info in/out
+ *
+ * @return int
+ */
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info);
+
+enum qed_cxt_elem_type {
+ QED_ELEM_CXT,
+ QED_ELEM_TASK
+};
+
+/**
+ * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+ u32 cid);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
new file mode 100644
index 000000000000..b9b7b7e6fa53
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -0,0 +1,1797 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/* API common to all protocols */
+void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module, u8 dp_level)
+{
+ u32 i;
+
+ cdev->dp_level = dp_level;
+ cdev->dp_module = dp_module;
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->dp_level = dp_level;
+ p_hwfn->dp_module = dp_module;
+ }
+}
+
+void qed_init_struct(struct qed_dev *cdev)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->cdev = cdev;
+ p_hwfn->my_id = i;
+ p_hwfn->b_active = false;
+
+ mutex_init(&p_hwfn->dmae_info.mutex);
+ }
+
+ /* hwfn 0 is always active */
+ cdev->hwfns[0].b_active = true;
+
+ /* set the default cache alignment to 128 */
+ cdev->cache_shift = 7;
+}
+
+static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ kfree(qm_info->qm_pq_params);
+ qm_info->qm_pq_params = NULL;
+ kfree(qm_info->qm_vport_params);
+ qm_info->qm_vport_params = NULL;
+ kfree(qm_info->qm_port_params);
+ qm_info->qm_port_params = NULL;
+}
+
+void qed_resc_free(struct qed_dev *cdev)
+{
+ int i;
+
+ kfree(cdev->fw_data);
+ cdev->fw_data = NULL;
+
+ kfree(cdev->reset_stats);
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ kfree(p_hwfn->p_tx_cids);
+ p_hwfn->p_tx_cids = NULL;
+ kfree(p_hwfn->p_rx_cids);
+ p_hwfn->p_rx_cids = NULL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_cxt_mngr_free(p_hwfn);
+ qed_qm_info_free(p_hwfn);
+ qed_spq_free(p_hwfn);
+ qed_eq_free(p_hwfn, p_hwfn->p_eq);
+ qed_consq_free(p_hwfn, p_hwfn->p_consq);
+ qed_int_free(p_hwfn);
+ qed_dmae_info_free(p_hwfn);
+ }
+}
+
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_port_params *p_qm_port;
+ u8 num_vports, i, vport_id, num_ports;
+ u16 num_pqs, multi_cos_tcs = 1;
+
+ memset(qm_info, 0, sizeof(*qm_info));
+
+ num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+ num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+
+ /* Sanity checking that setup requires legal number of resources */
+ if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
+ DP_ERR(p_hwfn,
+ "Need too many Physical queues - 0x%04x when only %04x are available\n",
+ num_pqs, RESC_NUM(p_hwfn, QED_PQ));
+ return -EINVAL;
+ }
+
+ /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
+ */
+ qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+ num_pqs, GFP_ATOMIC);
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+ num_vports, GFP_ATOMIC);
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
+ MAX_NUM_PORTS, GFP_ATOMIC);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+ /* First init per-TC PQs */
+ for (i = 0; i < multi_cos_tcs; i++) {
+ struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->wrr_group = 1;
+ }
+
+ /* Then init pure-LB PQ */
+ qm_info->pure_lb_pq = i;
+ qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[i].wrr_group = 1;
+ i++;
+
+ qm_info->offload_pq = 0;
+ qm_info->num_pqs = num_pqs;
+ qm_info->num_vports = num_vports;
+
+ /* Initialize qm port parameters */
+ num_ports = p_hwfn->cdev->num_ports_in_engines;
+ for (i = 0; i < num_ports; i++) {
+ p_qm_port = &qm_info->qm_port_params[i];
+ p_qm_port->active = 1;
+ p_qm_port->num_active_phys_tcs = 4;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+ p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ }
+
+ qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+ qm_info->pf_wfq = 0;
+ qm_info->pf_rl = 0;
+ qm_info->vport_rl_en = 1;
+
+ return 0;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+ kfree(qm_info->qm_pq_params);
+ kfree(qm_info->qm_vport_params);
+ kfree(qm_info->qm_port_params);
+
+ return -ENOMEM;
+}
+
+int qed_resc_alloc(struct qed_dev *cdev)
+{
+ struct qed_consq *p_consq;
+ struct qed_eq *p_eq;
+ int i, rc = 0;
+
+ cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
+ if (!cdev->fw_data)
+ return -ENOMEM;
+
+ /* Allocate Memory for the Queue->CID mapping */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ int tx_size = sizeof(struct qed_hw_cid_data) *
+ RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ int rx_size = sizeof(struct qed_hw_cid_data) *
+ RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+ p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
+ if (!p_hwfn->p_tx_cids) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for Tx Cids\n");
+ goto alloc_err;
+ }
+
+ p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
+ if (!p_hwfn->p_rx_cids) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for Rx Cids\n");
+ goto alloc_err;
+ }
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ /* First allocate the context manager structure */
+ rc = qed_cxt_mngr_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Set the HW cid/tid numbers (in the contest manager)
+ * Must be done prior to any further computations.
+ */
+ rc = qed_cxt_set_pf_params(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Prepare and process QM requirements */
+ rc = qed_init_qm_info(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Compute the ILT client partition */
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* CID map / ILT shadow table / T2
+ * The talbes sizes are determined by the computations above
+ */
+ rc = qed_cxt_tables_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SPQ, must follow ILT because initializes SPQ context */
+ rc = qed_spq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SP status block allocation */
+ p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+
+ rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ goto alloc_err;
+
+ /* EQ */
+ p_eq = qed_eq_alloc(p_hwfn, 256);
+
+ if (!p_eq)
+ goto alloc_err;
+ p_hwfn->p_eq = p_eq;
+
+ p_consq = qed_consq_alloc(p_hwfn);
+ if (!p_consq)
+ goto alloc_err;
+ p_hwfn->p_consq = p_consq;
+
+ /* DMA info initialization */
+ rc = qed_dmae_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for dmae_info structure\n");
+ goto alloc_err;
+ }
+ }
+
+ cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
+ if (!cdev->reset_stats) {
+ DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
+ goto alloc_err;
+ }
+
+ return 0;
+
+alloc_err:
+ qed_resc_free(cdev);
+ return rc;
+}
+
+void qed_resc_setup(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_cxt_mngr_setup(p_hwfn);
+ qed_spq_setup(p_hwfn);
+ qed_eq_setup(p_hwfn, p_hwfn->p_eq);
+ qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+
+ qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+ }
+}
+
+#define FINAL_CLEANUP_CMD_OFFSET (0)
+#define FINAL_CLEANUP_CMD (0x1)
+#define FINAL_CLEANUP_VALID_OFFSET (6)
+#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
+#define FINAL_CLEANUP_COMP (0x2)
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME (10)
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 id)
+{
+ u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+ int rc = -EBUSY;
+
+ addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
+
+ command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
+ command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
+ command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
+ command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+ /* Make sure notification is not set before initiating final cleanup */
+ if (REG_RD(p_hwfn, addr)) {
+ DP_NOTICE(
+ p_hwfn,
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+ REG_WR(p_hwfn, addr, 0);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ id, command);
+
+ qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+ /* Poll until completion */
+ while (!REG_RD(p_hwfn, addr) && count--)
+ msleep(FINAL_CLEANUP_POLL_TIME);
+
+ if (REG_RD(p_hwfn, addr))
+ rc = 0;
+ else
+ DP_NOTICE(p_hwfn,
+ "Failed to receive FW final cleanup notification\n");
+
+ /* Cleanup afterwards */
+ REG_WR(p_hwfn, addr, 0);
+
+ return rc;
+}
+
+static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+{
+ int hw_mode = 0;
+
+ hw_mode = (1 << MODE_BB_A0);
+
+ switch (p_hwfn->cdev->num_ports_in_engines) {
+ case 1:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+ break;
+ case 2:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+ break;
+ case 4:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
+ p_hwfn->cdev->num_ports_in_engines);
+ return;
+ }
+
+ switch (p_hwfn->cdev->mf_mode) {
+ case SF:
+ hw_mode |= 1 << MODE_SF;
+ break;
+ case MF_OVLAN:
+ hw_mode |= 1 << MODE_MF_SD;
+ break;
+ case MF_NPAR:
+ hw_mode |= 1 << MODE_MF_SI;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
+ hw_mode |= 1 << MODE_SF;
+ }
+
+ hw_mode |= 1 << MODE_ASIC;
+
+ p_hwfn->hw_info.hw_mode = hw_mode;
+}
+
+/* Init run time data for all PFs on an engine. */
+static void qed_init_cau_rt_data(struct qed_dev *cdev)
+{
+ u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+ int i, sb_id;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
+ sb_id++) {
+ p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ if (!p_block->is_pf)
+ continue;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_block->function_id,
+ 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
+ sb_entry);
+ }
+ }
+}
+
+static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_common_rt_init_params params;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc = 0;
+
+ qed_init_cau_rt_data(cdev);
+
+ /* Program GTT windows */
+ qed_gtt_init(p_hwfn);
+
+ if (p_hwfn->mcp_info) {
+ if (p_hwfn->mcp_info->func_info.bandwidth_max)
+ qm_info->pf_rl_en = 1;
+ if (p_hwfn->mcp_info->func_info.bandwidth_min)
+ qm_info->pf_wfq_en = 1;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+ params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params.pf_rl_en = qm_info->pf_rl_en;
+ params.pf_wfq_en = qm_info->pf_wfq_en;
+ params.vport_rl_en = qm_info->vport_rl_en;
+ params.vport_wfq_en = qm_info->vport_wfq_en;
+ params.port_params = qm_info->qm_port_params;
+
+ qed_qm_common_rt_init(p_hwfn, &params);
+
+ qed_cxt_hw_init_common(p_hwfn);
+
+ /* Close gate from NIG to BRB/Storm; By default they are open, but
+ * we close them to prevent NIG from passing data to reset blocks.
+ * Should have been done in the ENGINE phase, but init-tool lacks
+ * proper port-pretend capabilities.
+ */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ qed_port_unpretend(p_hwfn, p_ptt);
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ if (rc != 0)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+ /* Disable relaxed ordering in the PCI config space */
+ qed_wr(p_hwfn, p_ptt, 0x20b4,
+ qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+
+ return rc;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode)
+{
+ int rc = 0;
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ return rc;
+}
+
+static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch)
+{
+ u8 rel_pf_id = p_hwfn->rel_pf_id;
+ int rc = 0;
+
+ if (p_hwfn->mcp_info) {
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+ if (p_info->bandwidth_min)
+ p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+ /* Update rate limit once we'll actually have a link */
+ p_hwfn->qm_info.pf_rl = 100;
+ }
+
+ qed_cxt_hw_init_pf(p_hwfn);
+
+ qed_int_igu_init_rt(p_hwfn);
+
+ /* Set VLAN in NIG if needed */
+ if (hw_mode & (1 << MODE_MF_SD)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+ p_hwfn->hw_info.ovlan);
+ }
+
+ /* Enable classification by MAC if needed */
+ if (hw_mode & MODE_MF_SI) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Configuring TAGMAC_CLS_TYPE\n");
+ STORE_RT_REG(p_hwfn,
+ NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
+ }
+
+ /* Protocl Configuration */
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+ /* Cleanup chip from previous driver if such remains exist */
+ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+ if (rc != 0)
+ return rc;
+
+ /* PF Init sequence */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* Pure runtime initializations - directly to the HW */
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+ if (b_hw_start) {
+ /* enable interrupts */
+ qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+ /* send function start command */
+ rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+ }
+ return rc;
+}
+
+static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 enable)
+{
+ u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+ /* Change PF in PXP */
+ qed_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+ /* wait until value is set - try for 1 second every 50us */
+ for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+ val = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ if (val == set_val)
+ break;
+
+ usleep_range(50, 60);
+ }
+
+ if (val != set_val) {
+ DP_NOTICE(p_hwfn,
+ "PFID_ENABLE_MASTER wasn't changed after a second\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_main_ptt)
+{
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+}
+
+int qed_hw_init(struct qed_dev *cdev,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data)
+{
+ struct qed_storm_stats *p_stat;
+ u32 load_code, param, *p_address;
+ int rc, mfw_rc, i;
+ u8 fw_vport = 0;
+
+ rc = qed_init_fw_data(cdev, bin_fw_data);
+ if (rc != 0)
+ return rc;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
+ if (rc != 0)
+ return rc;
+
+ /* Enable DMAE in PXP */
+ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+ qed_calc_hw_mode(p_hwfn);
+
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_code);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+ return rc;
+ }
+
+ qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
+ rc, load_code);
+
+ p_hwfn->first_on_engine = (load_code ==
+ FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode,
+ b_hw_start, int_mode,
+ allow_npar_tx_switch);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "init phase failed for loadcode 0x%x (rc %d)\n",
+ load_code, rc);
+
+ /* ACK mfw regardless of success or failure of initialization */
+ mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_LOAD_DONE,
+ 0, &load_code, &param);
+ if (rc)
+ return rc;
+ if (mfw_rc) {
+ DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
+ return mfw_rc;
+ }
+
+ p_hwfn->hw_init_done = true;
+
+ /* init PF stats */
+ p_stat = &p_hwfn->storm_stats;
+ p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+
+ p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+
+ p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+
+ p_address = &p_stat->tstats.address;
+ *p_address = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
+ }
+
+ return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+int qed_hw_stop(struct qed_dev *cdev)
+{
+ int rc = 0, t_rc;
+ int i, j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+
+ /* mark the hw as uninitialized... */
+ p_hwfn->hw_init_done = false;
+
+ rc = qed_sp_pf_stop(p_hwfn);
+ if (rc)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+ if (i == QED_HW_STOP_RETRY_LIMIT)
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ /* Disable Attention Generation */
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+ }
+
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+ cdev->hwfns[0].p_main_ptt,
+ false);
+ if (t_rc != 0)
+ rc = t_rc;
+
+ return rc;
+}
+
+void qed_hw_stop_fastpath(struct qed_dev *cdev)
+{
+ int i, j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFDOWN,
+ "Shutting down the fastpath\n");
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+ if (i == QED_HW_STOP_RETRY_LIMIT)
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+ }
+}
+
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
+{
+ /* Re-open incoming traffic */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static int qed_reg_assert(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt, u32 reg,
+ bool expected)
+{
+ u32 assert_val = qed_rd(hwfn, ptt, reg);
+
+ if (assert_val != expected) {
+ DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+ reg, expected);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qed_hw_reset(struct qed_dev *cdev)
+{
+ int rc = 0;
+ u32 unload_resp, unload_param;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
+
+ /* Check for incorrect states */
+ qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+
+ /* Disable PF in HW blocks */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ TCFC_REG_STRONG_ENABLE_PF, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ CCFC_REG_STRONG_ENABLE_PF, 0);
+
+ /* Send unload command to MCP */
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_REQ,
+ DRV_MB_PARAM_UNLOAD_WOL_MCP,
+ &unload_resp, &unload_param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
+ unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_DONE,
+ 0, &unload_resp, &unload_param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
+{
+ qed_ptt_pool_free(p_hwfn);
+ kfree(p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate PTT pool */
+ rc = qed_ptt_pool_alloc(p_hwfn);
+ if (rc)
+ return rc;
+
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+ /* clear indirect access */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+ /* Clean Previous errors if such exist */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+
+ /* enable internal target-read */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ return 0;
+}
+
+static void get_function_id(struct qed_hwfn *p_hwfn)
+{
+ /* ME Register */
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+ p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+ p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID);
+ p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PORT);
+}
+
+static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
+{
+ u32 *feat_num = p_hwfn->hw_info.feat_num;
+ int num_features = 1;
+
+ feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
+ num_features,
+ RESC_NUM(p_hwfn, QED_L2_QUEUE));
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+ feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
+ num_features);
+}
+
+static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+{
+ u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u32 *resc_num = p_hwfn->hw_info.resc_num;
+ int num_funcs, i;
+
+ num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
+ : p_hwfn->cdev->num_ports_in_engines;
+
+ resc_num[QED_SB] = min_t(u32,
+ (MAX_SB_PER_PATH_BB / num_funcs),
+ qed_int_get_num_sbs(p_hwfn, NULL));
+ resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
+ resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
+ resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+ resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
+ resc_num[QED_RL] = 8;
+ resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+ resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
+ num_funcs;
+ resc_num[QED_ILT] = 950;
+
+ for (i = 0; i < QED_MAX_RESC; i++)
+ resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+
+ qed_hw_set_feat(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "The numbers for each resource are:\n"
+ "SB = %d start = %d\n"
+ "L2_QUEUE = %d start = %d\n"
+ "VPORT = %d start = %d\n"
+ "PQ = %d start = %d\n"
+ "RL = %d start = %d\n"
+ "MAC = %d start = %d\n"
+ "VLAN = %d start = %d\n"
+ "ILT = %d start = %d\n",
+ p_hwfn->hw_info.resc_num[QED_SB],
+ p_hwfn->hw_info.resc_start[QED_SB],
+ p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
+ p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
+ p_hwfn->hw_info.resc_num[QED_VPORT],
+ p_hwfn->hw_info.resc_start[QED_VPORT],
+ p_hwfn->hw_info.resc_num[QED_PQ],
+ p_hwfn->hw_info.resc_start[QED_PQ],
+ p_hwfn->hw_info.resc_num[QED_RL],
+ p_hwfn->hw_info.resc_start[QED_RL],
+ p_hwfn->hw_info.resc_num[QED_MAC],
+ p_hwfn->hw_info.resc_start[QED_MAC],
+ p_hwfn->hw_info.resc_num[QED_VLAN],
+ p_hwfn->hw_info.resc_start[QED_VLAN],
+ p_hwfn->hw_info.resc_num[QED_ILT],
+ p_hwfn->hw_info.resc_start[QED_ILT]);
+}
+
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+ u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
+ struct qed_mcp_link_params *link;
+
+ /* Read global nvm_cfg address */
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+ /* Verify MCP has initialized it */
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ /* Read Vendor Id / Device Id */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, pci_id);
+ p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
+ NVM_CFG1_GLOB_VENDOR_ID_MASK;
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, core_cfg);
+
+ core_cfg = qed_rd(p_hwfn, p_ptt, addr);
+
+ switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+ NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
+ core_cfg);
+ break;
+ }
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
+ offsetof(struct nvm_cfg1_func, device_id);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+
+ if (IS_MF(p_hwfn)) {
+ p_hwfn->hw_info.device_id =
+ (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
+ NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
+ } else {
+ p_hwfn->hw_info.device_id =
+ (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
+ NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
+ }
+
+ /* Read default link configuration */
+ link = &p_hwfn->mcp_info->link_input;
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, speed_cap_mask));
+ link->speed.advertised_speeds =
+ link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities =
+ link->speed.advertised_speeds;
+
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, link_settings));
+ switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+ NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+ link->speed.autoneg = true;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+ link->speed.forced_speed = 1000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+ link->speed.forced_speed = 10000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+ link->speed.forced_speed = 25000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+ link->speed.forced_speed = 40000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+ link->speed.forced_speed = 50000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+ link->speed.forced_speed = 100000;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
+ link_temp);
+ }
+
+ link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+ link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+ link->pause.autoneg = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+ link->pause.forced_rx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+ link->pause.forced_tx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+ link->loopback_mode = 0;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ link->speed.forced_speed, link->speed.advertised_speeds,
+ link->speed.autoneg, link->pause.autoneg);
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, generic_cont0);
+
+ generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->cdev->mf_mode = MF_OVLAN;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->cdev->mf_mode = MF_NPAR;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
+ p_hwfn->cdev->mf_mode = SF;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+ p_hwfn->cdev->mf_mode);
+
+ return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality personality)
+{
+ u32 port_mode;
+ int rc;
+
+ /* Read the port mode */
+ port_mode = qed_rd(p_hwfn, p_ptt,
+ CNIG_REG_NW_PORT_MODE_BB_B0);
+
+ if (port_mode < 3) {
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ } else if (port_mode <= 5) {
+ p_hwfn->cdev->num_ports_in_engines = 2;
+ } else {
+ DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
+ p_hwfn->cdev->num_ports_in_engines);
+
+ /* Default num_ports_in_engines to something */
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ }
+
+ qed_hw_get_nvm_info(p_hwfn, p_ptt);
+
+ rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ if (qed_mcp_is_init(p_hwfn))
+ ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
+ p_hwfn->mcp_info->func_info.mac);
+ else
+ eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
+ p_hwfn->hw_info.ovlan =
+ p_hwfn->mcp_info->func_info.ovlan;
+
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ enum qed_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
+
+ qed_hw_get_resc(p_hwfn);
+
+ return rc;
+}
+
+static void qed_get_dev_info(struct qed_dev *cdev)
+{
+ u32 tmp;
+
+ cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_NUM);
+ cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_REV);
+ MASK_FIELD(CHIP_REV, cdev->chip_rev);
+
+ /* Learn number of HW-functions */
+ tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+ if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
+ DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
+ cdev->num_hwfns = 2;
+ } else {
+ cdev->num_hwfns = 1;
+ }
+
+ cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_TEST_REG) >> 4;
+ MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
+ cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_METAL);
+ MASK_FIELD(CHIP_METAL, cdev->chip_metal);
+
+ DP_INFO(cdev->hwfns,
+ "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ cdev->chip_num, cdev->chip_rev,
+ cdev->chip_bond_id, cdev->chip_metal);
+}
+
+static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+ void __iomem *p_regview,
+ void __iomem *p_doorbells,
+ enum qed_pci_personality personality)
+{
+ int rc = 0;
+
+ /* Split PCI bars evenly between hwfns */
+ p_hwfn->regview = p_regview;
+ p_hwfn->doorbells = p_doorbells;
+
+ /* Validate that chip access is feasible */
+ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+ DP_ERR(p_hwfn,
+ "Reading the ME register returns all Fs; Preventing further chip access\n");
+ return -EINVAL;
+ }
+
+ get_function_id(p_hwfn);
+
+ rc = qed_hw_hwfn_prepare(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+ goto err0;
+ }
+
+ /* First hwfn learns basic information, e.g., number of hwfns */
+ if (!p_hwfn->my_id)
+ qed_get_dev_info(p_hwfn->cdev);
+
+ /* Initialize MCP structure */
+ rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
+ goto err1;
+ }
+
+ /* Read the device configuration information from the HW and SHMEM */
+ rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to get HW information\n");
+ goto err2;
+ }
+
+ /* Allocate the init RT array and initialize the init-ops engine */
+ rc = qed_init_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+ goto err2;
+ }
+
+ return rc;
+err2:
+ qed_mcp_free(p_hwfn);
+err1:
+ qed_hw_hwfn_free(p_hwfn);
+err0:
+ return rc;
+}
+
+static u32 qed_hw_bar_size(struct qed_dev *cdev,
+ u8 bar_id)
+{
+ u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
+
+ return size / cdev->num_hwfns;
+}
+
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality)
+{
+ int rc, i;
+
+ /* Store the precompiled init data ptrs */
+ qed_init_iro_array(cdev);
+
+ /* Initialize the first hwfn - will learn number of hwfns */
+ rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
+ cdev->doorbells, personality);
+ if (rc)
+ return rc;
+
+ personality = cdev->hwfns[0].hw_info.personality;
+
+ /* Initialize the rest of the hwfns */
+ for (i = 1; i < cdev->num_hwfns; i++) {
+ void __iomem *p_regview, *p_doorbell;
+
+ p_regview = cdev->regview +
+ i * qed_hw_bar_size(cdev, 0);
+ p_doorbell = cdev->doorbells +
+ i * qed_hw_bar_size(cdev, 1);
+ rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
+ p_doorbell, personality);
+ if (rc) {
+ /* Cleanup previously initialized hwfns */
+ while (--i >= 0) {
+ qed_init_free(&cdev->hwfns[i]);
+ qed_mcp_free(&cdev->hwfns[i]);
+ qed_hw_hwfn_free(&cdev->hwfns[i]);
+ }
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+void qed_hw_remove(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_init_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ }
+}
+
+int qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain)
+{
+ dma_addr_t p_pbl_phys = 0;
+ void *p_pbl_virt = NULL;
+ dma_addr_t p_phys = 0;
+ void *p_virt = NULL;
+ u16 page_cnt = 0;
+ size_t size;
+
+ if (mode == QED_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+ size = page_cnt * QED_CHAIN_PAGE_SIZE;
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain mem\n");
+ goto nomem;
+ }
+
+ if (mode == QED_CHAIN_MODE_PBL) {
+ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys,
+ GFP_KERNEL);
+ if (!p_pbl_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
+ goto nomem;
+ }
+
+ qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
+ (u8)elem_size, intended_use,
+ p_pbl_phys, p_pbl_virt);
+ } else {
+ qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
+ (u8)elem_size, intended_use, mode);
+ }
+
+ return 0;
+
+nomem:
+ dma_free_coherent(&cdev->pdev->dev,
+ page_cnt * QED_CHAIN_PAGE_SIZE,
+ p_virt, p_phys);
+ dma_free_coherent(&cdev->pdev->dev,
+ page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
+ p_pbl_virt, p_pbl_phys);
+
+ return -ENOMEM;
+}
+
+void qed_chain_free(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
+{
+ size_t size;
+
+ if (!p_chain->p_virt_addr)
+ return;
+
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ dma_free_coherent(&cdev->pdev->dev, size,
+ p_chain->pbl.p_virt_table,
+ p_chain->pbl.p_phys_table);
+ }
+
+ size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
+ dma_free_coherent(&cdev->pdev->dev, size,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+}
+
+static void __qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ int i, j;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct tstorm_per_port_stat tstats;
+ struct port_stats port_stats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats,
+ p_hwfn->storm_stats.mstats.address,
+ p_hwfn->storm_stats.mstats.len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats,
+ p_hwfn->storm_stats.ustats.address,
+ p_hwfn->storm_stats.ustats.len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats,
+ p_hwfn->storm_stats.pstats.address,
+ p_hwfn->storm_stats.pstats.len);
+
+ memset(&tstats, 0, sizeof(tstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats,
+ p_hwfn->storm_stats.tstats.address,
+ p_hwfn->storm_stats.tstats.len);
+
+ memset(&port_stats, 0, sizeof(port_stats));
+
+ if (p_hwfn->mcp_info)
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, stats),
+ sizeof(port_stats));
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ stats->no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ stats->ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ stats->tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ stats->tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ stats->tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ stats->tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+
+ stats->rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ stats->rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ stats->rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ stats->rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ stats->rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ stats->rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+
+ stats->mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ stats->mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+
+ stats->tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ stats->tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ stats->tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ stats->tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ stats->tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ stats->tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ stats->tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+ stats->rx_64_byte_packets += port_stats.pmm.r64;
+ stats->rx_127_byte_packets += port_stats.pmm.r127;
+ stats->rx_255_byte_packets += port_stats.pmm.r255;
+ stats->rx_511_byte_packets += port_stats.pmm.r511;
+ stats->rx_1023_byte_packets += port_stats.pmm.r1023;
+ stats->rx_1518_byte_packets += port_stats.pmm.r1518;
+ stats->rx_1522_byte_packets += port_stats.pmm.r1522;
+ stats->rx_2047_byte_packets += port_stats.pmm.r2047;
+ stats->rx_4095_byte_packets += port_stats.pmm.r4095;
+ stats->rx_9216_byte_packets += port_stats.pmm.r9216;
+ stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ stats->rx_crc_errors += port_stats.pmm.rfcs;
+ stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+ stats->rx_pause_frames += port_stats.pmm.rxpf;
+ stats->rx_pfc_frames += port_stats.pmm.rxpp;
+ stats->rx_align_errors += port_stats.pmm.raln;
+ stats->rx_carrier_errors += port_stats.pmm.rfcr;
+ stats->rx_oversize_packets += port_stats.pmm.rovr;
+ stats->rx_jabbers += port_stats.pmm.rjbr;
+ stats->rx_undersize_packets += port_stats.pmm.rund;
+ stats->rx_fragments += port_stats.pmm.rfrg;
+ stats->tx_64_byte_packets += port_stats.pmm.t64;
+ stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+ stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+ stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+ stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+ stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+ stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+ stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+ stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+ stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+ stats->tx_pause_frames += port_stats.pmm.txpf;
+ stats->tx_pfc_frames += port_stats.pmm.txpp;
+ stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+ stats->tx_total_collisions += port_stats.pmm.tncl;
+ stats->rx_mac_bytes += port_stats.pmm.rbyte;
+ stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+ stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+ stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+ stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+ stats->tx_mac_bytes += port_stats.pmm.tbyte;
+ stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+ stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+ stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+ stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+
+ for (j = 0; j < 8; j++) {
+ stats->brb_truncates += port_stats.brb.brb_truncate[j];
+ stats->brb_discards += port_stats.brb.brb_discard[j];
+ }
+ }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u32 i;
+
+ if (!cdev) {
+ memset(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ __qed_get_vport_stats(cdev, stats);
+
+ if (!cdev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.mstats.address,
+ &mstats,
+ p_hwfn->storm_stats.mstats.len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.ustats.address,
+ &ustats,
+ p_hwfn->storm_stats.ustats.len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.pstats.address,
+ &pstats,
+ p_hwfn->storm_stats.pstats.len);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ */
+ if (!cdev->reset_stats)
+ DP_INFO(cdev, "Reset stats not allocated\n");
+ else
+ __qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id, u16 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+ u16 min, max;
+
+ min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ DP_NOTICE(p_hwfn,
+ "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
+
+ return 0;
+}
+
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_VPORT);
+ max = min + RESC_NUM(p_hwfn, QED_VPORT);
+ DP_NOTICE(p_hwfn,
+ "vport id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
+
+ return 0;
+}
+
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
+ max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
+ DP_NOTICE(p_hwfn,
+ "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
new file mode 100644
index 000000000000..e29a3ba6c8b0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -0,0 +1,283 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEV_API_H
+#define _QED_DEV_API_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed_int.h"
+
+/**
+ * @brief qed_init_dp - initialize the debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module,
+ u8 dp_level);
+
+/**
+ * @brief qed_init_struct - initialize the device structure to
+ * its defaults
+ *
+ * @param cdev
+ */
+void qed_init_struct(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_free -
+ *
+ * @param cdev
+ */
+void qed_resc_free(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_alloc -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_resc_alloc(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_setup -
+ *
+ * @param cdev
+ */
+void qed_resc_setup(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_init -
+ *
+ * @param cdev
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ * for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ * Pass NULL if not using binary fw file.
+ *
+ * @return int
+ */
+int qed_hw_init(struct qed_dev *cdev,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data);
+
+/**
+ * @brief qed_hw_stop -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_stop(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_stop_fastpath -should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_stop_fastpath(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_start_fastpath -restart fastpath traffic,
+ * only if hw_stop_fastpath was called
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_hw_reset -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_reset(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_prepare -
+ *
+ * @param cdev
+ * @param personality - personality to initialize
+ *
+ * @return int
+ */
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality);
+
+/**
+ * @brief qed_hw_remove -
+ *
+ * @param cdev
+ */
+void qed_hw_remove(struct qed_dev *cdev);
+
+/**
+ * @brief qed_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_ptt
+ */
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats);
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+enum qed_dmae_address_type_t {
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+
+struct qed_dmae_params {
+ u32 flags; /* consists of QED_DMAE_FLAG_* values */
+};
+
+/**
+ * @brief qed_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
+int
+qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief qed_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return int
+ */
+int
+qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain);
+
+/**
+ * @brief qed_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void qed_chain_free(struct qed_dev *cdev,
+ struct qed_chain *p_chain);
+
+/**
+ * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id,
+ u16 *dst_id);
+
+/**
+ * @@brief qed_fw_vport - Get absolute vport ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * *@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ *
+ * @return int
+ */
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 id);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
new file mode 100644
index 000000000000..b2f8e854dfd1
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -0,0 +1,5291 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HSI_H
+#define _QED_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+
+struct qed_hwfn;
+struct qed_ptt;
+/********************************/
+/* Add include to common target */
+/********************************/
+
+/* opcodes for the event ring */
+enum common_event_opcode {
+ COMMON_EVENT_PF_START,
+ COMMON_EVENT_PF_STOP,
+ COMMON_EVENT_RESERVED,
+ COMMON_EVENT_RESERVED2,
+ COMMON_EVENT_RESERVED3,
+ COMMON_EVENT_RESERVED4,
+ COMMON_EVENT_RESERVED5,
+ MAX_COMMON_EVENT_OPCODE
+};
+
+/* Common Ramrod Command IDs */
+enum common_ramrod_cmd_id {
+ COMMON_RAMROD_UNUSED,
+ COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+ COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+ COMMON_RAMROD_RESERVED,
+ COMMON_RAMROD_RESERVED2,
+ COMMON_RAMROD_RESERVED3,
+ MAX_COMMON_RAMROD_CMD_ID
+};
+
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+ __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons /* SPQ Ring Consumer */;
+ __le16 consolid_cons /* Consolidation Ring Consumer */;
+ __le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct xstorm_core_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 core_state /* state */;
+ u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2 /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 consolid_prod /* physical_q1 */;
+ __le16 reserved16 /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_or_spq_prod /* word4 */;
+ __le16 word5 /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* core connection context */
+struct core_conn_context {
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2] /* padding */;
+};
+
+struct eth_mstorm_per_queue_stat {
+ struct regpair ttl0_discard;
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+ struct regpair not_active_discard;
+ struct regpair tpa_coalesced_pkts;
+ struct regpair tpa_coalesced_events;
+ struct regpair tpa_aborts_num;
+ struct regpair tpa_coalesced_bytes;
+};
+
+struct eth_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
+};
+
+struct eth_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+/* Event Ring Next Page Address */
+struct event_ring_next_addr {
+ struct regpair addr /* Next Page Address */;
+ __le32 reserved[2] /* Reserved */;
+};
+
+union event_ring_element {
+ struct event_ring_entry entry /* Event Ring Entry */;
+ struct event_ring_next_addr next_addr;
+};
+
+enum personality_type {
+ PERSONALITY_RESERVED,
+ PERSONALITY_RESERVED2,
+ PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
+ PERSONALITY_RESERVED3,
+ PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_RESERVED4,
+ MAX_PERSONALITY_TYPE
+};
+
+struct pf_start_tunnel_config {
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+ u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+ u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+ __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+};
+
+/* Ramrod data for PF start ramrod */
+struct pf_start_ramrod_data {
+ struct regpair event_ring_pbl_addr;
+ struct regpair consolid_q_pbl_addr;
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id;
+ u8 base_vf_id;
+ u8 num_vfs;
+ u8 event_ring_num_pages;
+ u8 event_ring_sb_index;
+ u8 path_id;
+ u8 warning_as_error;
+ u8 dont_log_ramrods;
+ u8 personality;
+ __le16 log_type_mask;
+ u8 mf_mode /* Multi function mode */;
+ u8 integ_phase /* Integration phase */;
+ u8 allow_npar_tx_switching;
+ u8 inner_to_outer_pri_map[8];
+ u8 pri_map_valid;
+ u32 outer_tag;
+ u8 reserved0[4];
+};
+
+enum ports_mode {
+ ENGX2_PORTX1 /* 2 engines x 1 port */,
+ ENGX2_PORTX2 /* 2 engines x 2 ports */,
+ ENGX1_PORTX1 /* 1 engine x 1 port */,
+ ENGX1_PORTX2 /* 1 engine x 2 ports */,
+ ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ MAX_PORTS_MODE
+};
+
+/* Ramrod Header of SPQE */
+struct ramrod_header {
+ __le32 cid /* Slowpath Connection CID */;
+ u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+ u8 protocol_id /* Ramrod Protocol ID */;
+ __le16 echo /* Ramrod echo */;
+};
+
+/* Slowpath Element (SPQE) */
+struct slow_path_element {
+ struct ramrod_header hdr /* Ramrod Header */;
+ struct regpair data_ptr;
+};
+
+struct tstorm_per_port_stat {
+ struct regpair trunc_error_discard;
+ struct regpair mac_error_discard;
+ struct regpair mftag_filter_discard;
+ struct regpair eth_mac_filter_discard;
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt;
+ struct regpair fcoe_irregular_pkt;
+ struct regpair roce_irregular_pkt;
+ struct regpair eth_irregular_pkt;
+ struct regpair toe_irregular_pkt;
+ struct regpair preroce_irregular_pkt;
+};
+
+struct atten_status_block {
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index /* status block running index */;
+ __le32 reserved1;
+};
+
+enum block_addr {
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x618000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
+ MAX_BLOCK_ADDR
+};
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_PTU,
+ BLOCK_DMAE,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_MISC_AEU,
+ BLOCK_BAR0_MAP,
+ MAX_BLOCK_ID
+};
+
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+struct dmae_cmd {
+ __le32 opcode;
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+ __le32 src_addr_lo;
+ __le32 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 dst_addr_hi;
+ __le16 length /* Length in DW */;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+ __le32 comp_addr_lo /* PCIe completion address low or grc address */;
+ __le32 comp_addr_hi;
+ __le32 comp_val /* Value to write to copmletion address */;
+ __le32 crc32 /* crc16 result */;
+ __le32 crc_32_c /* crc32_c result */;
+ __le16 crc16 /* crc16 result */;
+ __le16 crc16_c /* crc16_c result */;
+ __le16 crc10 /* crc_t10 result */;
+ __le16 reserved;
+ __le16 xsum16 /* checksum16 result */;
+ __le16 xsum8 /* checksum8 result */;
+};
+
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+enum init_modes {
+ MODE_BB_A0,
+ MODE_RESERVED,
+ MODE_RESERVED2,
+ MODE_ASIC,
+ MODE_RESERVED3,
+ MODE_RESERVED4,
+ MODE_RESERVED5,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_40G,
+ MODE_100G,
+ MODE_EAGLE_ENG1_WORKAROUND,
+ MAX_INIT_MODES
+};
+
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_RESERVED,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+ TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+ TPH_ST_HINT_TARGET,
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+ u32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct tstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 word1 /* word1 */;
+ __le32 rx_producers /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+/*********************************** Init ************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* init pattern size in bytes */
+#define INIT_PATTERN_SIZE_BITS 4
+#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS)
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
+/* Global PXP window */
+#define NUM_OF_PXP_WIN 19
+#define PXP_WIN_DWORD_SIZE_BITS 10
+#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS)
+#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
+#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
+
+/********************************* GRC Dump **********************************/
+
+/* width of GRC dump register sequence length in bits */
+#define DUMP_SEQ_LEN_BITS 8
+#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
+
+/* width of GRC dump memory length in bits */
+#define DUMP_MEM_LEN_BITS 18
+#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
+
+/* width of register type ID in bits */
+#define REG_TYPE_ID_BITS 6
+#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
+
+/* width of block ID in bits */
+#define BLOCK_ID_BITS 8
+#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
+
+/******************************** Idle Check *********************************/
+
+/* max number of idle check predicate immediates */
+#define MAX_IDLE_CHK_PRED_IMM 3
+
+/* max number of idle check argument registers */
+#define MAX_IDLE_CHK_READ_REGS 3
+
+/* max number of idle check loops */
+#define MAX_IDLE_CHK_LOOPS 0x10000
+
+/* max idle check address increment */
+#define MAX_IDLE_CHK_INCREMENT 0x10000
+
+/* inicates an undefined idle check line index */
+#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
+
+/* max number of register values following the idle check header */
+#define IDLE_CHK_MAX_DUMP_REGS 2
+
+/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
+#define IDLE_CHK_QM_RD_WR_PTR 0
+#define IDLE_CHK_QM_RD_WR_BANK 1
+
+/**************************************/
+/* HSI Functions constants and macros */
+/**************************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* the MCP Trace meta data signautre is duplicated in the perl script that
+ * generats the NVRAM images.
+ */
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
+/* Binary buffer header */
+struct bin_buffer_hdr {
+ u32 offset;
+ u32 length /* buffer length in bytes */;
+};
+
+/* binary buffer types */
+enum bin_buffer_type {
+ BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
+ BIN_BUF_INIT_CMD /* init commands */,
+ BIN_BUF_INIT_VAL /* init data */,
+ BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+ BIN_BUF_IRO /* internal RAM offsets array */,
+ MAX_BIN_BUFFER_TYPE
+};
+
+/* Chip IDs */
+enum chip_ids {
+ CHIP_BB_A0 /* BB A0 chip ID */,
+ CHIP_BB_B0 /* BB B0 chip ID */,
+ CHIP_K2 /* AH chip ID */,
+ MAX_CHIP_IDS
+};
+
+enum idle_chk_severity_types {
+ IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_IDLE_CHK_SEVERITY_TYPES
+};
+
+struct init_array_raw_hdr {
+ __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+struct init_array_standard_hdr {
+ __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+struct init_array_zipped_hdr {
+ __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+struct init_array_pattern_hdr {
+ __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+};
+
+union init_array_hdr {
+ struct init_array_raw_hdr raw /* raw init array header */;
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped /* zipped init array header */;
+ struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+enum init_array_types {
+ INIT_ARR_STANDARD /* standard init array */,
+ INIT_ARR_ZIPPED /* zipped init array */,
+ INIT_ARR_PATTERN /* a repeated pattern */,
+ MAX_INIT_ARRAY_TYPES
+};
+
+/* init operation: callback */
+struct init_callback_op {
+ __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id /* Callback ID */;
+ __le16 block_id /* Blocks ID */;
+};
+
+/* init comparison types */
+enum init_comparison_types {
+ INIT_COMPARISON_EQ /* init value is included in the init command */,
+ INIT_COMPARISON_OR /* init value is all zeros */,
+ INIT_COMPARISON_AND /* init value is an array of values */,
+ MAX_INIT_COMPARISON_TYPES
+};
+
+/* init operation: delay */
+struct init_delay_op {
+ __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay /* delay in us */;
+};
+
+/* init operation: if_mode */
+struct init_if_mode_op {
+ __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+ __le16 modes_buf_offset;
+};
+
+/* init operation: if_phase */
+struct init_if_phase_op {
+ __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+ __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+};
+
+/* init mode operators */
+enum init_mode_ops {
+ INIT_MODE_OP_NOT /* init mode not operator */,
+ INIT_MODE_OP_OR /* init mode or operator */,
+ INIT_MODE_OP_AND /* init mode and operator */,
+ MAX_INIT_MODE_OPS
+};
+
+/* init operation: raw */
+struct init_raw_op {
+ __le32 op_data;
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2 /* Init param 2 */;
+};
+
+/* init array params */
+struct init_op_array_params {
+ __le16 size /* array size in dwords */;
+ __le16 offset /* array start offset in dwords */;
+};
+
+/* Write init operation arguments */
+union init_write_args {
+ __le32 inline_val;
+ __le32 zeros_count;
+ __le32 array_offset;
+ struct init_op_array_params runtime;
+};
+
+/* init operation: write */
+struct init_write_op {
+ __le32 data;
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args /* Write init operation arguments */;
+};
+
+/* init operation: read */
+struct init_read_op {
+ __le32 op_data;
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+#define INIT_READ_OP_POLL_COMP_MASK 0x7
+#define INIT_READ_OP_POLL_COMP_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 7
+#define INIT_READ_OP_POLL_MASK 0x1
+#define INIT_READ_OP_POLL_SHIFT 8
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
+ __le32 expected_val;
+};
+
+/* Init operations union */
+union init_op {
+ struct init_raw_op raw /* raw init operation */;
+ struct init_write_op write /* write init operation */;
+ struct init_read_op read /* read init operation */;
+ struct init_if_mode_op if_mode /* if_mode init operation */;
+ struct init_if_phase_op if_phase /* if_phase init operation */;
+ struct init_callback_op callback /* callback init operation */;
+ struct init_delay_op delay /* delay init operation */;
+};
+
+/* Init command operation types */
+enum init_op_types {
+ INIT_OP_READ /* GRC read init command */,
+ INIT_OP_WRITE /* GRC write init command */,
+ INIT_OP_IF_MODE,
+ INIT_OP_IF_PHASE,
+ INIT_OP_DELAY /* delay init command */,
+ INIT_OP_CALLBACK /* callback init command */,
+ MAX_INIT_OP_TYPES
+};
+
+/* init source types */
+enum init_source_types {
+ INIT_SRC_INLINE /* init value is included in the init command */,
+ INIT_SRC_ZEROS /* init value is all zeros */,
+ INIT_SRC_ARRAY /* init value is an array of values */,
+ INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ MAX_INIT_SOURCE_TYPES
+};
+
+/* Internal RAM Offsets macro data */
+struct iro {
+ u32 base /* RAM field offset */;
+ u16 m1 /* multiplier 1 */;
+ u16 m2 /* multiplier 2 */;
+ u16 m3 /* multiplier 3 */;
+ u16 size /* RAM field size */;
+};
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+ u8 active /* Indicates if this port is active */;
+ u8 num_active_phys_tcs;
+ u16 num_pbf_cmd_lines;
+ u16 num_btb_blocks;
+ __le16 reserved;
+};
+
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+ u8 vport_id /* VPORT ID */;
+ u8 tc_id /* TC ID */;
+ u8 wrr_group /* WRR group */;
+ u8 reserved;
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+ u32 vport_rl;
+ u16 vport_wfq;
+ u16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD \
+ 0x00f000UL
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM \
+ 0x010000UL
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM \
+ 0x011000UL
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
+ 0x012000UL
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM \
+ 0x013000UL
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
+ 0x014000UL
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
+ 0x015000UL
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM \
+ 0x016000UL
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM \
+ 0x017000UL
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM \
+ 0x018000UL
+
+/**
+ * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs);
+
+struct qed_qm_common_rt_init_params {
+ u8 max_ports_per_engine;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ struct init_qm_port_params *port_params;
+};
+
+/**
+ * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param port_params - array of size MAX_NUM_PORTS with
+ * arameters for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_qm_common_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params);
+
+struct qed_qm_pf_rt_init_params {
+ u8 port_id;
+ u8 pf_id;
+ u8 max_phys_tcs_per_port;
+ bool is_first_pf;
+ u32 num_pf_cids;
+ u32 num_vf_cids;
+ u32 num_tids;
+ u16 start_pq;
+ u16 num_pf_pqs;
+ u16 num_vf_pqs;
+ u8 start_vport;
+ u8 num_vports;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct init_qm_pq_params *pq_params;
+ struct init_qm_vport_params *vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
+
+/**
+ * @brief qed_init_pf_rl Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl);
+
+/**
+ * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl);
+/**
+ * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
+ */
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \
+ ((port_id) * \
+ IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \
+ ((vf_id) * \
+ IRO[2].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base)
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \
+ ((pf_id) * \
+ IRO[4].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[4].size)
+/* Ustorm Completion ring consumer */
+#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \
+ ((global_queue_id) * \
+ IRO[5].m1))
+#define USTORM_CQ_CONS_SIZE (IRO[5].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \
+ ((core_rx_queue_id) * \
+ IRO[12].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size)
+/* Tstorm LiteL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \
+ ((core_rx_q_id) * \
+ IRO[13].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \
+ ((core_rx_q_id) * \
+ IRO[14].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \
+ ((core_txst_id) * \
+ IRO[15].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \
+ ((stat_counter_id) * \
+ IRO[16].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size)
+/* Mstorm producers */
+#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \
+ ((queue_id) * \
+ IRO[17].m1))
+#define MSTORM_PRODS_SIZE (IRO[17].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \
+ ((stat_counter_id) * \
+ IRO[19].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[19].size)
+/* Ustorm queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \
+ ((queue_id) * \
+ IRO[20].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \
+ ((stat_counter_id) * \
+ IRO[21].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \
+ ((pf_id) * \
+ IRO[22].m1))
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size)
+/* Ystorm queue zone */
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \
+ ((queue_id) * \
+ IRO[23].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \
+ ((rss_id) * \
+ IRO[24].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \
+ ((rss_id) * \
+ IRO[25].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \
+ ((pf_id) * \
+ IRO[26].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \
+ ((cmdq_queue_id) * \
+ IRO[27].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size)
+/* Mstorm rq-cons of given queue-id */
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \
+ ((rq_queue_id) * \
+ IRO[28].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size)
+/* Pstorm RoCE statistics */
+#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \
+ ((stat_counter_id) * \
+ IRO[29].m1))
+#define PSTORM_ROCE_STAT_SIZE (IRO[29].size)
+/* Tstorm RoCE statistics */
+#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \
+ ((stat_counter_id) * \
+ IRO[30].m1))
+#define TSTORM_ROCE_STAT_SIZE (IRO[30].size)
+
+static const struct iro iro_arr[31] = {
+ { 0x10, 0x0, 0x0, 0x0, 0x8 },
+ { 0x4448, 0x60, 0x0, 0x0, 0x60 },
+ { 0x498, 0x8, 0x0, 0x0, 0x4 },
+ { 0x494, 0x0, 0x0, 0x0, 0x4 },
+ { 0x10, 0x8, 0x0, 0x0, 0x2 },
+ { 0x90, 0x8, 0x0, 0x0, 0x2 },
+ { 0x4540, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x39e0, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x2598, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x4350, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x52d0, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x7a48, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x100, 0x8, 0x0, 0x0, 0x8 },
+ { 0x5808, 0x10, 0x0, 0x0, 0x10 },
+ { 0xb100, 0x30, 0x0, 0x0, 0x30 },
+ { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
+ { 0x54f8, 0x40, 0x0, 0x0, 0x40 },
+ { 0x200, 0x10, 0x0, 0x0, 0x8 },
+ { 0x9e70, 0x0, 0x0, 0x0, 0x4 },
+ { 0x7ca0, 0x40, 0x0, 0x0, 0x30 },
+ { 0xd00, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2790, 0x80, 0x0, 0x0, 0x38 },
+ { 0xa520, 0xf0, 0x0, 0x0, 0xf0 },
+ { 0x80, 0x8, 0x0, 0x0, 0x8 },
+ { 0xac0, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2580, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2500, 0x8, 0x0, 0x0, 0x8 },
+ { 0x440, 0x8, 0x0, 0x0, 0x2 },
+ { 0x1800, 0x8, 0x0, 0x0, 0x2 },
+ { 0x27c8, 0x80, 0x0, 0x0, 0x10 },
+ { 0x4710, 0x10, 0x0, 0x0, 0x10 },
+};
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2232
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6663
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6665
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6667
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29834
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29901
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29902
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29964
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30505
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30762
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30764
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30796
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30812
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30846
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31006
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31007
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31520
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 33056
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33568
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431
+
+#define RUNTIME_ARRAY_SIZE 34432
+
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The eth storm context for the Pstorm */
+struct pstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The eth storm context for the Xstorm */
+struct xstorm_eth_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The eth storm context for the Ustorm */
+struct ustorm_eth_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+/* eth connection context */
+struct eth_conn_context {
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2] /* padding */;
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2] /* padding */;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+};
+
+enum eth_filter_action {
+ ETH_FILTER_ACTION_REMOVE,
+ ETH_FILTER_ACTION_ADD,
+ ETH_FILTER_ACTION_REPLACE,
+ MAX_ETH_FILTER_ACTION
+};
+
+struct eth_filter_cmd {
+ u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+ u8 vport_id /* the vport id */;
+ u8 action /* filter command action: add/remove/replace */;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
+};
+
+struct eth_filter_cmd_header {
+ u8 rx;
+ u8 tx;
+ u8 cmd_cnt;
+ u8 assert_on_error;
+ u8 reserved1[4];
+};
+
+enum eth_filter_type {
+ ETH_FILTER_TYPE_MAC,
+ ETH_FILTER_TYPE_VLAN,
+ ETH_FILTER_TYPE_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC,
+ ETH_FILTER_TYPE_INNER_VLAN,
+ ETH_FILTER_TYPE_INNER_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_VNI,
+ MAX_ETH_FILTER_TYPE
+};
+
+enum eth_ramrod_cmd_id {
+ ETH_RAMROD_UNUSED,
+ ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+ ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+ ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+ ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+ ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+ ETH_RAMROD_RESERVED,
+ ETH_RAMROD_RESERVED2,
+ ETH_RAMROD_RESERVED3,
+ ETH_RAMROD_RESERVED4,
+ ETH_RAMROD_RESERVED5,
+ ETH_RAMROD_RESERVED6,
+ ETH_RAMROD_RESERVED7,
+ ETH_RAMROD_RESERVED8,
+ MAX_ETH_RAMROD_CMD_ID
+};
+
+struct eth_vport_rss_config {
+ __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9
+ u8 rss_id;
+ u8 rss_mode;
+ u8 update_rss_key;
+ u8 update_rss_ind_table;
+ u8 update_rss_capabilities;
+ u8 tbl_size;
+ __le32 reserved2[2];
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3[2];
+};
+
+enum eth_vport_rss_mode {
+ ETH_VPORT_RSS_MODE_DISABLED,
+ ETH_VPORT_RSS_MODE_REGULAR,
+ MAX_ETH_VPORT_RSS_MODE
+};
+
+struct eth_vport_rx_mode {
+ __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+ __le16 reserved2[3];
+};
+
+struct eth_vport_tpa_param {
+ u64 reserved[2];
+};
+
+struct eth_vport_tx_mode {
+ __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+ __le16 reserved2[3];
+};
+
+struct rx_queue_start_ramrod_data {
+ __le16 rx_queue_id;
+ __le16 num_of_pbl_pages;
+ __le16 bd_max_bytes;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 default_rss_queue_flg;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 stats_counter_id;
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ u8 pxp_st_hint;
+ __le16 pxp_st_index;
+ u8 reserved[4];
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair sge_base;
+};
+
+struct rx_queue_stop_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[3];
+};
+
+struct rx_queue_update_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 init_sge_ring_flg;
+ u8 vport_id;
+ u8 pxp_tph_valid_sge;
+ u8 pxp_st_hint;
+ __le16 pxp_st_index;
+ u8 reserved[6];
+ struct regpair sge_base;
+};
+
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 tc;
+ u8 stats_counter_id;
+ __le16 qm_pq_id;
+ u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ u8 pxp_st_hint;
+ u8 reserved1[3];
+ __le16 queue_zone_id;
+ __le16 test_dup_count;
+ __le16 pbl_size;
+ struct regpair pbl_base_addr;
+};
+
+struct tx_queue_stop_ramrod_data {
+ __le16 reserved[4];
+};
+
+struct vport_filter_update_ramrod_data {
+ struct eth_filter_cmd_header filter_cmd_hdr;
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+struct vport_start_ramrod_data {
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ __le16 sge_buff_size;
+ u8 max_sges_num;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+ u8 default_vlan_en;
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ __le16 default_vlan;
+ u8 untagged;
+ u8 reserved[7];
+};
+
+struct vport_stop_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+struct vport_update_ramrod_data_cmn {
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_sge_param_flg;
+ __le16 sge_buff_size;
+ u8 max_sges_num;
+ u8 update_tx_switching_en_flg;
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+ u8 default_vlan_en;
+ u8 update_default_vlan_flg;
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 reserved;
+};
+
+struct vport_update_ramrod_mcast {
+ __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+};
+
+struct vport_update_ramrod_data {
+ struct vport_update_ramrod_data_cmn common;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config;
+};
+
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 rx_bd_cons /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 rx_bd_prod /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 tx_bd_cons /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 tx_drv_bd_cons /* word2 */;
+ __le16 rx_drv_cqe_cons /* word3 */;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+#define VF_MAX_STATIC 192 /* In case of K2 */
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2 /* Global */
+#define MCP_GLOB_PORT_MAX 4 /* Global */
+#define MCP_GLOB_FUNC_MAX 16 /* Global */
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize) ((((_offsize & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */
+#define QED_SECTION_SIZE(_offsize) (((_offsize & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
+
+/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
+ * within section.
+ */
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + \
+ (QED_SECTION_SIZE(_offsize) * idx))
+
+/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
+ * Use offsetof, since the OFFSETUP collide with the firmware definition
+ */
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \
+ offsetof(struct \
+ mcp_public_data, \
+ sections[_section]))
+/* PHY configuration */
+struct pmm_phy_cfg {
+ u32 speed;
+#define PMM_SPEED_AUTONEG 0
+
+ u32 pause; /* bitmask */
+#define PMM_PAUSE_NONE 0x0
+#define PMM_PAUSE_AUTONEG 0x1
+#define PMM_PAUSE_RX 0x2
+#define PMM_PAUSE_TX 0x4
+
+ u32 adv_speed; /* Default should be the speed_cap_mask */
+ u32 loopback_mode;
+#define PMM_LOOPBACK_NONE 0
+#define PMM_LOOPBACK_INT_PHY 1
+#define PMM_LOOPBACK_EXT_PHY 2
+#define PMM_LOOPBACK_EXT 3
+#define PMM_LOOPBACK_MAC 4
+
+ /* features */
+ u32 feature_config_flags;
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg; /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct pmm_stats {
+ u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
+ u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
+ u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
+ u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
+ u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
+ u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
+ u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+ u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
+ u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+ u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+ u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+ u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+ u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+ u64 tlpiec;
+ u64 tncl;
+ u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+ u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+ u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+ u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+ u64 rxpok;
+ u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+ u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+ u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+ u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+ u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct pmm_stats pmm;
+};
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM BIT(0)
+
+#define PORT_CMT_PORT_ROLE BIT(1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE BIT(1)
+
+#define PORT_CMT_TEAM_MASK BIT(2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 BIT(2)
+};
+
+/**************************************
+* LLDP and DCBX HSI structures
+**************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+
+enum lldp_agent_e {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status; /* TBD */
+
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+ u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC 4
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+/* Not in use
+ * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
+ * #define DCBX_APP_DEFAULT_PRI_SHIFT 8
+ */
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+
+ /* PFC feature */
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000003
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u16 valid;
+ u16 length;
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/**************************************/
+/* */
+/* P U B L I C G L O B A L */
+/* */
+/**************************************/
+struct public_global {
+ u32 max_path;
+#define MAX_PATH_BIG_BEAR 2
+#define MAX_PATH_K2 1
+ u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+};
+
+/**************************************/
+/* */
+/* P U B L I C P A T H */
+/* */
+/**************************************/
+
+/****************************************************************************
+* Shared Memory 2 Region *
+****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 128 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE 0
+#define ACCUM_ACK_PF_SHIFT 0
+
+#define ACCUM_ACK_VF_BASE 8
+#define ACCUM_ACK_VF_SHIFT 3
+
+#define ACCUM_ACK_IOV_DIS_BASE 256
+#define ACCUM_ACK_IOV_DIS_SHIFT 8
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/* */
+/* P U B L I C P O R T */
+/* */
+/**************************************/
+
+/****************************************************************************
+* Driver <-> FW Mailbox *
+****************************************************************************/
+
+struct public_port {
+ u32 validity_map; /* 0x0 (4*2 = 0x8) */
+
+ /* validity bits */
+#define MCP_VALIDITY_PCI_CFG 0x00100000
+#define MCP_VALIDITY_MB 0x00200000
+#define MCP_VALIDITY_DEV_INFO 0x00400000
+#define MCP_VALIDITY_RESERVED 0x00000007
+
+ /* One licensing bit should be set */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+
+ /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP \
+ 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+#define LINK_STATUS_PFC_ENABLED \
+ 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT \
+ 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET 0
+#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+#define LFA_NO_REASON (0 << 0)
+#define LFA_LINK_DOWN BIT(0)
+#define LFA_FORCE_INIT BIT(1)
+#define LFA_LOOPBACK_MISMATCH BIT(2)
+#define LFA_SPEED_MISMATCH BIT(3)
+#define LFA_FLOW_CTRL_MISMATCH BIT(4)
+#define LFA_ADV_SPEED_MISMATCH BIT(5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET 16
+#define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ u32 link_change_count;
+
+ /* LLDP params */
+ struct lldp_config_params_s lldp_config_params[
+ LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[
+ LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+};
+
+/**************************************/
+/* */
+/* P U B L I C F U N C */
+/* */
+/**************************************/
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 reserved[8];
+
+ u32 config;
+
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 1 % */
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VLINK_DOWN 0x00000001
+
+ u32 mac_upper; /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag; /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation; /* vf per pf */
+
+ u32 preserve_data; /* Will be used bt CCM */
+
+ u32 driver_last_activity_ts;
+
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0xff000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+};
+
+/**************************************/
+/* */
+/* P U B L I C M B */
+/* */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+ u32 mac_upper; /* Upper 16 bits are always zeroes */
+ u32 mac_lower;
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+union drv_union_data {
+ u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+ struct mcp_mac wol_mac;
+
+ struct pmm_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64; /* For PHY / AVS commands */
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ struct drv_version_stc drv_version;
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+ /* Params - FORCE - Reinitialize the link regardless of LFA */
+ /* - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+
+#define DRV_MSG_CODE_SET_LLDP 0x24000000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+
+#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
+#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
+#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
+#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
+#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
+#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+
+ /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+ /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+ /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+ /* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
+#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
+#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
+#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
+
+/* configure vf MSIX params*/
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+ u32 fw_mb_header;
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
+#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_FLR_ACK 0x02000000
+#define FW_MSG_CODE_FLR_NACK 0x02100000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
+#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_PHY_OK 0x00110000
+#define FW_MSG_CODE_PHY_ERROR 0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ union drv_union_data union_data;
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+* Description
+* Incremental Aggregative
+* 8-bit MFW counter per message
+* 8-bit ack-counter per message
+* Capabilities
+* Provides up to 256 aggregative message per type
+* Provides 4 message types in dword
+* Message type pointers to byte offset
+* Backward Compatibility by using sizeof for the counters.
+* No lock requires for 32bit messages
+* Limitations:
+* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+* is required to prevent data corruption.
+**********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/* */
+/* P U B L I C D A T A */
+/* */
+/**************************************/
+enum public_sections {
+ PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
+ PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+struct mcp_public_data {
+ /* The sections fields is an array */
+ u32 num_sections;
+ offsize_t sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+ struct drv_ver_info_stc drv_info;
+};
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+
+ u32 mac_addr_lo;
+};
+
+/******************************************
+* nvm_cfg1 structs
+******************************************/
+
+struct nvm_cfg1_glob {
+ u32 generic_cont0; /* 0x0 */
+#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
+#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
+#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
+#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
+#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
+#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
+#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
+
+ u32 engineering_change[3]; /* 0x4 */
+
+ u32 manufacturing_id; /* 0x10 */
+
+ u32 serial_number[4]; /* 0x14 */
+
+ u32 pcie_cfg; /* 0x24 */
+#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
+#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
+
+ u32 mgmt_traffic; /* 0x28 */
+#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
+#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
+#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0
+#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
+
+ u32 core_cfg; /* 0x2C */
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
+#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
+#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
+#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1
+#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
+
+ u32 e_lane_cfg1; /* 0x30 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+
+ u32 e_lane_cfg2; /* 0x34 */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
+#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
+#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
+#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
+#define NVM_CFG1_GLOB_NCSI_OFFSET 12
+#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
+#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
+
+ u32 f_lane_cfg1; /* 0x38 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+
+ u32 f_lane_cfg2; /* 0x3C */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+
+ u32 eagle_preemphasis; /* 0x40 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+
+ u32 eagle_driver_current; /* 0x44 */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+
+ u32 falcon_preemphasis; /* 0x48 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+
+ u32 falcon_driver_current; /* 0x4C */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+
+ u32 pci_id; /* 0x50 */
+#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
+
+ u32 pci_subsys_id; /* 0x54 */
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
+
+ u32 bar; /* 0x58 */
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
+#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
+#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
+#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
+#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
+#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
+#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
+#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
+#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
+#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
+#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
+#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
+#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
+#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
+#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
+#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
+#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
+
+ u32 eagle_txfir_main; /* 0x5C */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+
+ u32 eagle_txfir_post; /* 0x60 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+
+ u32 falcon_txfir_main; /* 0x64 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+
+ u32 falcon_txfir_post; /* 0x68 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+
+ u32 manufacture_ver; /* 0x6C */
+#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
+#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
+#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
+#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
+#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+
+ u32 manufacture_time; /* 0x70 */
+#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+
+ u32 led_global_settings; /* 0x74 */
+#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
+#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
+#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
+#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
+#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
+#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
+#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
+#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+
+ u32 generic_cont1; /* 0x78 */
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
+
+ u32 mbi_version; /* 0x7C */
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+
+ u32 mbi_date; /* 0x80 */
+
+ u32 misc_sig; /* 0x84 */
+
+ /* Define the GPIO mapping to switch i2c mux */
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+
+ u32 reserved[46]; /* 0x88 */
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[30]; /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+ u32 power_dissipated; /* 0x0 */
+#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF
+#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0
+#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00
+#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8
+#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000
+#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16
+#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000
+#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24
+
+ u32 power_consumed; /* 0x4 */
+#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF
+#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0
+#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00
+#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8
+#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000
+#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16
+#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000
+#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24
+
+ u32 generic_cont0; /* 0x8 */
+#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
+#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
+#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
+#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
+#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
+#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
+#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
+#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
+#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
+#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
+#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
+#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
+#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
+#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
+#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
+#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
+#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
+#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
+#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
+#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+
+ u32 pcie_cfg; /* 0xC */
+#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
+#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
+
+ u32 features; /* 0x10 */
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
+
+ u32 speed_cap_mask; /* 0x14 */
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
+
+ u32 link_settings; /* 0x18 */
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
+
+ u32 phy_cfg; /* 0x1C */
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10
+#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
+#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
+#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
+#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
+#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
+#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
+#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
+#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
+#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
+
+ u32 mgmt_traffic; /* 0x20 */
+#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
+#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2
+
+ u32 ext_phy; /* 0x24 */
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+
+ u32 mba_cfg1; /* 0x28 */
+#define NVM_CFG1_PORT_MBA_MASK 0x00000001
+#define NVM_CFG1_PORT_MBA_OFFSET 0
+#define NVM_CFG1_PORT_MBA_DISABLED 0x0
+#define NVM_CFG1_PORT_MBA_ENABLED 0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
+#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
+#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
+#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED5_2K 0x1
+#define NVM_CFG1_PORT_RESERVED5_4K 0x2
+#define NVM_CFG1_PORT_RESERVED5_8K 0x3
+#define NVM_CFG1_PORT_RESERVED5_16K 0x4
+#define NVM_CFG1_PORT_RESERVED5_32K 0x5
+#define NVM_CFG1_PORT_RESERVED5_64K 0x6
+#define NVM_CFG1_PORT_RESERVED5_128K 0x7
+#define NVM_CFG1_PORT_RESERVED5_256K 0x8
+#define NVM_CFG1_PORT_RESERVED5_512K 0x9
+#define NVM_CFG1_PORT_RESERVED5_1M 0xA
+#define NVM_CFG1_PORT_RESERVED5_2M 0xB
+#define NVM_CFG1_PORT_RESERVED5_4M 0xC
+#define NVM_CFG1_PORT_RESERVED5_8M 0xD
+#define NVM_CFG1_PORT_RESERVED5_16M 0xE
+#define NVM_CFG1_PORT_RESERVED5_32M 0xF
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21
+
+ u32 mba_cfg2; /* 0x2C */
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0
+#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000
+#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16
+
+ u32 vf_cfg; /* 0x30 */
+#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
+#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
+#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED6_4K 0x1
+#define NVM_CFG1_PORT_RESERVED6_8K 0x2
+#define NVM_CFG1_PORT_RESERVED6_16K 0x3
+#define NVM_CFG1_PORT_RESERVED6_32K 0x4
+#define NVM_CFG1_PORT_RESERVED6_64K 0x5
+#define NVM_CFG1_PORT_RESERVED6_128K 0x6
+#define NVM_CFG1_PORT_RESERVED6_256K 0x7
+#define NVM_CFG1_PORT_RESERVED6_512K 0x8
+#define NVM_CFG1_PORT_RESERVED6_1M 0x9
+#define NVM_CFG1_PORT_RESERVED6_2M 0xA
+#define NVM_CFG1_PORT_RESERVED6_4M 0xB
+#define NVM_CFG1_PORT_RESERVED6_8M 0xC
+#define NVM_CFG1_PORT_RESERVED6_16M 0xD
+#define NVM_CFG1_PORT_RESERVED6_32M 0xE
+#define NVM_CFG1_PORT_RESERVED6_64M 0xF
+
+ struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
+
+ u32 led_port_settings; /* 0x3C */
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
+
+ u32 transceiver_00; /* 0x40 */
+
+ /* Define for mapping of transceiver signal module absent */
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
+ /* Define the GPIO mux settings to switch i2c mux to this port */
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
+
+ u32 reserved[133]; /* 0x44 */
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address; /* 0x0 */
+
+ u32 rsrv1; /* 0x8 */
+#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
+
+ u32 rsrv2; /* 0xC */
+#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
+
+ u32 device_id; /* 0x10 */
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16
+
+ u32 cmn_cfg; /* 0x14 */
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
+#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
+#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
+#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
+#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
+#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
+#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
+
+ u32 pci_cfg; /* 0x18 */
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
+#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
+#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
+#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
+#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
+#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
+#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
+#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
+#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
+#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
+#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
+#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
+#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
+#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
+#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
+#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
+#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
+#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
+#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
+#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
+
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
+
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
+
+ u32 reserved[9]; /* 0x2C */
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob; /* 0x0 */
+
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
+
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
+
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
+};
+
+/******************************************
+* nvm_cfg structs
+******************************************/
+
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_2 2
+#define PORT_3 3
+
+extern struct spad_layout g_spad;
+
+#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size) \
+ (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
+ (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+struct spad_layout {
+ struct nvm_cfg nvm_cfg;
+ struct mcp_public_data public_data;
+};
+
+#define CRC_MAGIC_VALUE 0xDEBB20E3
+#define CRC32_POLYNOMIAL 0xEDB88320
+#define NVM_CRC_SIZE (sizeof(u32))
+
+enum nvm_sw_arbitrator {
+ NVM_SW_ARB_HOST,
+ NVM_SW_ARB_MCP,
+ NVM_SW_ARB_UART,
+ NVM_SW_ARB_RESERVED
+};
+
+/****************************************************************************
+* Boot Strap Region *
+****************************************************************************/
+struct legacy_bootstrap_region {
+ u32 magic_value;
+#define NVM_MAGIC_VALUE 0x669955aa
+ u32 sram_start_addr;
+ u32 code_len; /* boot code length (in dwords) */
+ u32 code_start_addr;
+ u32 crc; /* 32-bit CRC */
+};
+
+/****************************************************************************
+* Directories Region *
+****************************************************************************/
+struct nvm_code_entry {
+ u32 image_type; /* Image type */
+ u32 nvm_start_addr; /* NVM address of the image */
+ u32 len; /* Include CRC */
+ u32 sram_start_addr;
+ u32 sram_run_addr; /* Relevant in case of MIM only */
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+
+ NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 200
+
+struct nvm_dir {
+ s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK 0x00000001
+#define NVM_DIR_SEQ_MASK 0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+
+#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
+
+ u32 num_images;
+ u32 rsrv;
+ struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
+ (_num_images - \
+ 1) * sizeof(struct nvm_code_entry) + \
+ NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+ u32 format_revision;
+#define VPD_IMAGE_VERSION 1
+
+ /* This array length depends on the number of VPD fields */
+ u8 vpd_data[1];
+};
+
+/****************************************************************************
+* NVRAM FULL MAP *
+****************************************************************************/
+#define DIR_ID_1 (0)
+#define DIR_ID_2 (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1 (0)
+#define MFW_BUNDLE_2 (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
+#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */
+#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */
+
+#define LIM_MAX_SIZE ((2 * \
+ FLASH_PAGE_SIZE) - \
+ sizeof(struct legacy_bootstrap_region) - \
+ NVM_RSV_SIZE)
+#define LIM_OFFSET (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE (44)
+#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
+ FPGA_MIM_MAX_SIZE)
+#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
+ ((idx == \
+ NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
+#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
+ MIM_MAX_SIZE(is_asic) * 2)
+
+union nvm_dir_union {
+ struct nvm_dir dir;
+ u8 page[FLASH_PAGE_SIZE];
+};
+
+/* Address
+ * +-------------------+ 0x000000
+ * | Bootstrap: |
+ * | magic_number |
+ * | sram_start_addr |
+ * | code_len |
+ * | code_start_addr |
+ * | crc |
+ * +-------------------+ 0x000014
+ * | rsrv |
+ * +-------------------+ 0x000040
+ * | LIM |
+ * +-------------------+ 0x002000
+ * | Dir1 |
+ * +-------------------+ 0x003000
+ * | Dir2 |
+ * +-------------------+ 0x004000
+ * | MIM1 |
+ * +-------------------+ 0x130000
+ * | MIM2 |
+ * +-------------------+ 0x25C000
+ * | Rest Images: |
+ * | TIM1/2 |
+ * | MFW_TRACE1/2 |
+ * | Eagle/Falcon FW |
+ * | PCIE/AVS FW |
+ * | MBA/CCM/L2B |
+ * | VPD |
+ * | optic_modules |
+ * | ... |
+ * +-------------------+ 0x400000
+ */
+struct nvm_image {
+/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
+ /* NVM Offset (size) */
+ struct legacy_bootstrap_region bootstrap;
+ u8 rsrv[NVM_RSV_SIZE];
+ u8 lim_image[LIM_MAX_SIZE];
+ union nvm_dir_union dir[MAX_MFW_BUNDLES];
+
+ /* MIM1_IMAGE 0x004000 (0x12c000) */
+ /* MIM2_IMAGE 0x130000 (0x12c000) */
+/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
+}; /* 0x134 */
+
+#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
+
+struct hw_set_info {
+ u32 reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+ u32 bank_num;
+ u32 pf_num;
+ u32 operation;
+#define READ_OP 1
+#define WRITE_OP 2
+#define RMW_SET_OP 3
+#define RMW_CLR_OP 4
+
+ u32 reg_addr;
+ u32 reg_data;
+
+ u32 reset_type;
+#define POR_RESET_TYPE BIT(0)
+#define HARD_RESET_TYPE BIT(1)
+#define CORE_RESET_TYPE BIT(2)
+#define MCP_RESET_TYPE BIT(3)
+#define PERSET_ASSERT BIT(4)
+#define PERSET_DEASSERT BIT(5)
+};
+
+struct hw_set_image {
+ u32 format_version;
+#define HW_SET_IMAGE_VERSION 1
+ u32 no_hw_sets;
+
+ /* This array length depends on the no_hw_sets */
+ struct hw_set_info hw_sets[1];
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
new file mode 100644
index 000000000000..ffa99273b353
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -0,0 +1,776 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_reg_addr.h"
+
+#define QED_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
+
+struct qed_ptt {
+ struct list_head list_entry;
+ unsigned int idx;
+ struct pxp_ptt_entry pxp;
+};
+
+struct qed_ptt_pool {
+ struct list_head free_list;
+ spinlock_t lock; /* ptt synchronized access */
+ struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
+ GFP_ATOMIC);
+ int i;
+
+ if (!p_pool)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_pool->free_list);
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_pool->ptts[i].idx = i;
+ p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
+ p_pool->ptts[i].pxp.pretend.control = 0;
+ if (i >= RESERVED_PTT_MAX)
+ list_add(&p_pool->ptts[i].list_entry,
+ &p_pool->free_list);
+ }
+
+ p_hwfn->p_ptt_pool = p_pool;
+ spin_lock_init(&p_pool->lock);
+
+ return 0;
+}
+
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+ int i;
+
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+ p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
+ }
+}
+
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->p_ptt_pool);
+ p_hwfn->p_ptt_pool = NULL;
+}
+
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+ unsigned int i;
+
+ /* Take the free PTT from the list */
+ for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+ p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
+ struct qed_ptt, list_entry);
+ list_del(&p_ptt->list_entry);
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "allocated ptt %d\n", p_ptt->idx);
+ return p_ptt;
+ }
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+ usleep_range(1000, 2000);
+ }
+
+ DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+ return NULL;
+}
+
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+ list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* The HW is using DWORDS and we need to translate it to Bytes */
+ return le32_to_cpu(p_ptt->pxp.offset) << 2;
+}
+
+static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+ p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+ p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr)
+{
+ u32 prev_hw_addr;
+
+ prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+ if (new_hw_addr == prev_hw_addr)
+ return;
+
+ /* Update PTT entery in admin window */
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Updating PTT entry %d to offset 0x%x\n",
+ p_ptt->idx, new_hw_addr);
+
+ /* The HW is using DWORDS and the address is in Bytes */
+ p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, offset),
+ le32_to_cpu(p_ptt->pxp.offset));
+}
+
+static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr)
+{
+ u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 offset;
+
+ offset = hw_addr - win_hw_addr;
+
+ /* Verify the address is within the window */
+ if (hw_addr < win_hw_addr ||
+ offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+ offset = 0;
+ }
+
+ return qed_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx)
+{
+ if (ptt_idx >= RESERVED_PTT_MAX) {
+ DP_NOTICE(p_hwfn,
+ "Requested PTT %d is out of range\n", ptt_idx);
+ return NULL;
+ }
+
+ return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr, u32 val)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+ REG_WR(p_hwfn, bar_addr, val);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+}
+
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+ u32 val = REG_RD(p_hwfn, bar_addr);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+ return val;
+}
+
+static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *addr,
+ u32 hw_addr,
+ size_t n,
+ bool to_device)
+{
+ u32 dw_count, *host_addr, hw_offset;
+ size_t quota, done = 0;
+ u32 __iomem *reg_addr;
+
+ while (done < n) {
+ quota = min_t(size_t, n - done,
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = qed_ptt_get_bar_addr(p_ptt);
+
+ dw_count = quota / 4;
+ host_addr = (u32 *)((u8 *)addr + done);
+ reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
+ if (to_device)
+ while (dw_count--)
+ DIRECT_REG_WR(reg_addr++, *host_addr++);
+ else
+ while (dw_count--)
+ *host_addr++ = DIRECT_REG_RD(reg_addr++);
+
+ done += quota;
+ }
+}
+
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *dest, u32 hw_addr, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+ hw_addr, dest, hw_addr, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr, void *src, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+ hw_addr, hw_addr, src, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+ /* Every pretend undos previous pretends, including
+ * previous port pretend.
+ */
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+/* DMAE */
+static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
+ const u8 is_src_type_grc,
+ const u8 is_dst_type_grc,
+ struct qed_dmae_params *p_params)
+{
+ u32 opcode = 0;
+ u16 opcodeB = 0;
+
+ /* Whether the source is the PCIe or the GRC.
+ * 0- The source is the PCIe
+ * 1- The source is the GRC.
+ */
+ opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+ : DMAE_CMD_SRC_MASK_PCIE) <<
+ DMAE_CMD_SRC_SHIFT;
+ opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+ DMAE_CMD_SRC_PF_ID_SHIFT);
+
+ /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+ opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+ : DMAE_CMD_DST_MASK_PCIE) <<
+ DMAE_CMD_DST_SHIFT;
+ opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+ DMAE_CMD_DST_PF_ID_SHIFT);
+
+ /* Whether to write a completion word to the completion destination:
+ * 0-Do not write a completion word
+ * 1-Write the completion word
+ */
+ opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
+ opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+ DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+ if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
+ opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+
+ opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+
+ opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
+
+ /* reset source address in next go */
+ opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+ DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+ /* reset dest address in next go */
+ opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
+ DMAE_CMD_DST_ADDR_RESET_SHIFT);
+
+ opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT);
+
+ opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
+ DMAE_CMD_DST_VF_ID_SHIFT);
+
+ p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+}
+
+u32 qed_dmae_idx_to_go_cmd(u8 idx)
+{
+ /* All the DMAE 'go' registers form an array in internal memory */
+ return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static int
+qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+ u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+ int qed_status = 0;
+
+ /* verify address is not NULL */
+ if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
+ ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+ DP_NOTICE(p_hwfn,
+ "source or destination address 0 idx_cmd=%d\n"
+ "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(command->opcode),
+ le16_to_cpu(command->opcode_b),
+ le16_to_cpu(command->length),
+ le32_to_cpu(command->src_addr_hi),
+ le32_to_cpu(command->src_addr_lo),
+ le32_to_cpu(command->dst_addr_hi),
+ le32_to_cpu(command->dst_addr_lo));
+
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(command->opcode),
+ le16_to_cpu(command->opcode_b),
+ le16_to_cpu(command->length),
+ le32_to_cpu(command->src_addr_hi),
+ le32_to_cpu(command->src_addr_lo),
+ le32_to_cpu(command->dst_addr_hi),
+ le32_to_cpu(command->dst_addr_lo));
+
+ /* Copy the command to DMAE - need to do it before every call
+ * for source/dest address no reset.
+ * The first 9 DWs are the command registers, the 10 DW is the
+ * GO register, and the rest are result registers
+ * (which are read only by the client).
+ */
+ for (i = 0; i < DMAE_CMD_SIZE; i++) {
+ u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+ *(((u32 *)command) + i) : 0;
+
+ qed_wr(p_hwfn, p_ptt,
+ DMAE_REG_CMD_MEM +
+ (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+ (i * sizeof(u32)), data);
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ qed_dmae_idx_to_go_cmd(idx_cmd),
+ DMAE_GO_VALUE);
+
+ return qed_status;
+}
+
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+ struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+ u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+ u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+ *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32),
+ p_addr,
+ GFP_KERNEL);
+ if (!*p_comp) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_addr, GFP_KERNEL);
+ if (!*p_cmd) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_addr, GFP_KERNEL);
+ if (!*p_buff) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+ goto err;
+ }
+
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+ return 0;
+err:
+ qed_dmae_info_free(p_hwfn);
+ return -ENOMEM;
+}
+
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t p_phys;
+
+ /* Just make sure no one is in the middle */
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ if (p_hwfn->dmae_info.p_completion_word) {
+ p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32),
+ p_hwfn->dmae_info.p_completion_word,
+ p_phys);
+ p_hwfn->dmae_info.p_completion_word = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_dmae_cmd) {
+ p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_hwfn->dmae_info.p_dmae_cmd,
+ p_phys);
+ p_hwfn->dmae_info.p_dmae_cmd = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_intermediate_buffer) {
+ p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_hwfn->dmae_info.p_intermediate_buffer,
+ p_phys);
+ p_hwfn->dmae_info.p_intermediate_buffer = NULL;
+ }
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+}
+
+static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
+{
+ u32 wait_cnt = 0;
+ u32 wait_cnt_limit = 10000;
+
+ int qed_status = 0;
+
+ barrier();
+ while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+ udelay(DMAE_MIN_WAIT_TIME);
+ if (++wait_cnt > wait_cnt_limit) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
+ *p_hwfn->dmae_info.p_completion_word,
+ DMAE_COMPLETION_VAL);
+ qed_status = -EBUSY;
+ break;
+ }
+
+ /* to sync the completion_word since we are not
+ * using the volatile keyword for p_completion_word
+ */
+ barrier();
+ }
+
+ if (qed_status == 0)
+ *p_hwfn->dmae_info.p_completion_word = 0;
+
+ return qed_status;
+}
+
+static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type,
+ u8 dst_type,
+ u32 length)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ int qed_status = 0;
+
+ switch (src_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+ (void *)(uintptr_t)src_addr,
+ length * sizeof(u32));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dst_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd->length = cpu_to_le16((u16)length);
+
+ qed_dmae_post_command(p_hwfn, p_ptt);
+
+ qed_status = qed_dmae_operation_wait(p_hwfn);
+
+ if (qed_status) {
+ DP_NOTICE(p_hwfn,
+ "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
+ src_addr,
+ dst_addr,
+ length);
+ return qed_status;
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
+ memcpy((void *)(uintptr_t)(dst_addr),
+ &p_hwfn->dmae_info.p_intermediate_buffer[0],
+ length * sizeof(u32));
+
+ return 0;
+}
+
+static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr, u64 dst_addr,
+ u8 src_type, u8 dst_type,
+ u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ u64 src_addr_split = 0, dst_addr_split = 0;
+ u16 length_limit = DMAE_MAX_RW_SIZE;
+ int qed_status = 0;
+ u32 offset = 0;
+
+ qed_dmae_opcode(p_hwfn,
+ (src_type == QED_DMAE_ADDRESS_GRC),
+ (dst_type == QED_DMAE_ADDRESS_GRC),
+ p_params);
+
+ cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
+
+ /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+ cnt_split = size_in_dwords / length_limit;
+ length_mod = size_in_dwords % length_limit;
+
+ src_addr_split = src_addr;
+ dst_addr_split = dst_addr;
+
+ for (i = 0; i <= cnt_split; i++) {
+ offset = length_limit * i;
+
+ if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
+ if (src_type == QED_DMAE_ADDRESS_GRC)
+ src_addr_split = src_addr + offset;
+ else
+ src_addr_split = src_addr + (offset * 4);
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_GRC)
+ dst_addr_split = dst_addr + offset;
+ else
+ dst_addr_split = dst_addr + (offset * 4);
+
+ length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+ /* might be zero on last iteration */
+ if (!length_cur)
+ continue;
+
+ qed_status = qed_dmae_execute_sub_operation(p_hwfn,
+ p_ptt,
+ src_addr_split,
+ dst_addr_split,
+ src_type,
+ dst_type,
+ length_cur);
+ if (qed_status) {
+ DP_NOTICE(p_hwfn,
+ "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+ qed_status,
+ src_addr,
+ dst_addr,
+ length_cur);
+ break;
+ }
+ }
+
+ return qed_status;
+}
+
+int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct qed_dmae_params params;
+ int rc;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = flags;
+
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ grc_addr_in_dw,
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_GRC,
+ size_in_dwords, &params);
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union qed_qm_pq_params *p_params)
+{
+ u16 pq_id = 0;
+
+ if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
+ !p_params) {
+ DP_NOTICE(p_hwfn,
+ "Protocol %d received NULL PQ params\n",
+ proto);
+ return 0;
+ }
+
+ switch (proto) {
+ case PROTOCOLID_CORE:
+ if (p_params->core.tc == LB_TC)
+ pq_id = p_hwfn->qm_info.pure_lb_pq;
+ else
+ pq_id = p_hwfn->qm_info.offload_pq;
+ break;
+ case PROTOCOLID_ETH:
+ pq_id = p_params->eth.tc;
+ break;
+ default:
+ pq_id = 0;
+ }
+
+ pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
+
+ return pq_id;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
new file mode 100644
index 000000000000..e56d433793be
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -0,0 +1,263 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HW_H
+#define _QED_HW_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+
+/* Forward decleration */
+struct qed_ptt;
+
+enum reserved_ptts {
+ RESERVED_PTT_EDIAG,
+ RESERVED_PTT_USER_SPACE,
+ RESERVED_PTT_MAIN,
+ RESERVED_PTT_DPC,
+ RESERVED_PTT_MAX
+};
+
+enum _dmae_cmd_dst_mask {
+ DMAE_CMD_DST_MASK_NONE = 0,
+ DMAE_CMD_DST_MASK_PCIE = 1,
+ DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+ DMAE_CMD_SRC_MASK_PCIE = 0,
+ DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+ DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+ DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE 0x1
+
+#define DMAE_COMPLETION_VAL 0xD1AE
+#define DMAE_CMD_ENDIANITY 0x2
+
+#define DMAE_CMD_SIZE 14
+#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
+#define DMAE_MIN_WAIT_TIME 0x2
+#define DMAE_MAX_CLIENTS 32
+
+/**
+ * @brief qed_gtt_init - Initialize GTT windows
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return struct _qed_status - success (0), negative - error.
+ */
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr);
+
+/**
+ * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct qed_ptt *
+ */
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
+
+/**
+ * @brief qed_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ u32 val);
+
+/**
+ * @brief qed_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr);
+
+/**
+ * @brief qed_memcpy_from - copy n bytes from BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *dest,
+ u32 hw_addr,
+ size_t n);
+
+/**
+ * @brief qed_memcpy_to - copy n bytes to BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ void *src,
+ size_t n);
+/**
+ * @brief qed_fid_pretend - pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ */
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid);
+
+/**
+ * @brief qed_port_pretend - pretend to another port when
+ * accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id);
+
+/**
+ * @brief qed_port_unpretend - cancel any previously set port
+ * pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ * @param idx
+ */
+u32 qed_dmae_idx_to_go_cmd(u8 idx);
+
+/**
+ * @brief qed_dmae_info_alloc - Init the dmae_info structure
+ * which is part of p_hwfn.
+ * @param p_hwfn
+ */
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_dmae_info_free - Free the dmae_info structure
+ * which is part of p_hwfn
+ *
+ * @param p_hwfn
+ */
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
+
+union qed_qm_pq_params {
+ struct {
+ u8 tc;
+ } core;
+
+ struct {
+ u8 is_vf;
+ u8 vf_id;
+ u8 tc;
+ } eth;
+};
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union qed_qm_pq_params *params);
+
+int qed_init_fw_data(struct qed_dev *cdev,
+ const u8 *fw_data);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
new file mode 100644
index 000000000000..0b21a553cc7d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -0,0 +1,798 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+enum cminterface {
+ MCM_SEC,
+ MCM_PRI,
+ UCM_SEC,
+ UCM_PRI,
+ TCM_SEC,
+ TCM_PRI,
+ YCM_SEC,
+ YCM_PRI,
+ XCM_SEC,
+ XCM_PRI,
+ NUM_OF_CM_INTERFACES
+};
+
+/* general constants */
+#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, \
+ 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
+ 0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID 0xffff
+/* feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND 6250000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+#define QM_WFQ_VP_PQ_PF_SHIFT 5
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL 4375000
+#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
+/* RL constants */
+#define QM_RL_UPPER_BOUND 6250000
+#define QM_RL_PERIOD 5 /* in us */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+#define QM_RL_INC_VAL(rate) max_t(u32, \
+ (((rate ? rate : 1000000) \
+ * QM_RL_PERIOD) / 8), 1)
+#define QM_RL_MAX_INC_VAL 4375000
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+#define EAGLE_WORKAROUND_TC 7
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES 150
+#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
+#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
+ 4) * \
+ 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS 38
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_EAGLE_WORKAROUND_BLOCKS 4
+#define BTB_PURE_LB_FACTOR 10
+#define BTB_PURE_LB_RATIO 7
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 0x2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
+ _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, \
+ value) SET_FIELD(var[cmd ## _ ## field ## \
+ _OFFSET], \
+ cmd ## _ ## field, \
+ value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
+ (max_phy_tcs_pr_port) \
+ + (tc))
+#define LB_VOQ(port) ( \
+ MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phy_tcs_pr_port) \
+ ((tc) < \
+ LB_TC ? PHYS_VOQ(port, \
+ tc, \
+ max_phy_tcs_pr_port) \
+ : LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
+ bool pf_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+ if (pf_rl_en) {
+ /* enable RLs for all VOQs */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (1 << MAX_NUM_VOQS) - 1);
+ /* write RL period */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIOD_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
+ bool pf_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (pf_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
+ bool vport_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+ vport_rl_en ? 1 : 0);
+ if (vport_rl_en) {
+ /* write RL period (use timer 0 only) */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
+ bool vport_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+ vport_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (vport_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
+ u8 voq,
+ u16 cmdq_lines)
+{
+ u32 qm_line_crd;
+
+ /* In A0 - Limit the size of pbf queue so that only 511 commands with
+ * the minimum size of 4 (FCoE minimum size)
+ */
+ bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+
+ if (is_bb_a0)
+ cmdq_lines = min_t(u32, cmdq_lines, 1022);
+ qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ (u32)cmdq_lines);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void qed_cmdq_lines_rt_init(
+ struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u8 tc, voq, port_id;
+
+ /* clear PBF lines for all VOQs */
+ for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (port_params[port_id].active) {
+ u16 phys_lines, phys_lines_per_tc;
+ u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+ /* find #lines to divide between the active
+ * physical TCs.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines -
+ PBF_CMDQ_PURE_LB_LINES;
+ /* find #lines per active physical TC */
+ phys_lines_per_tc = phys_lines / phys_tcs;
+ /* init registers per active TC */
+ for (tc = 0; tc < phys_tcs; tc++) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ phys_lines_per_tc);
+ }
+ /* init registers for pure LB TC */
+ qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ PBF_CMDQ_PURE_LB_LINES);
+ }
+ }
+}
+
+static void qed_btb_blocks_rt_init(
+ struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, voq, port_id;
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ u32 temp;
+ u8 phys_tcs;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+ /* subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* find blocks per physical TC. use factor to avoid
+ * floating arithmethic.
+ */
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (phys_tcs * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks / BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
+
+ /* init physical TCs */
+ for (tc = 0; tc < phys_tcs; tc++) {
+ voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+ phys_blocks);
+ }
+
+ /* init pure LB TC */
+ temp = LB_VOQ(port_id);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+ pure_lb_blocks);
+ }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void qed_tx_pq_map_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
+{
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+ u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
+ QM_PF_QUEUE_GROUP_SIZE;
+ bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+ u16 i, pq_id, pq_group;
+
+ /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+ u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+
+ /* set mapping from PQ group to PF */
+ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+ STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+ (u32)(p_params->pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_pf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_vf_cids));
+
+ /* go over all Tx PQs */
+ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
+ u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+ bool is_vf_pq = (i >= p_params->num_pf_pqs);
+ struct qm_rf_pq_map tx_pq_map;
+
+ /* update first Tx PQ of VPORT/TC */
+ u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
+ p_params->start_vport;
+ u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
+ u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+
+ if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+ /* create new VP PQ */
+ pq_ids[p_params->pq_params[i].tc_id] = pq_id;
+ first_tx_pq_id = pq_id;
+ /* map VP PQ to VOQ and PF */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPMAP_RT_OFFSET +
+ first_tx_pq_id,
+ (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (p_params->pf_id <<
+ QM_WFQ_VP_PQ_PF_SHIFT));
+ }
+ /* fill PQ map entry */
+ memset(&tx_pq_map, 0, sizeof(tx_pq_map));
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+ is_vf_pq ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+ is_vf_pq ? p_params->pq_params[i].vport_id : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+ p_params->pq_params[i].wrr_group);
+ /* write PQ map entry to CAM */
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+ *((u32 *)&tx_pq_map));
+ /* set base address */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ /* check if VF PQ */
+ if (is_vf_pq) {
+ /* if PQ is associated with a VF, add indication
+ * to PQ VF mask
+ */
+ tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+ (1 << (pq_id % tx_pq_vf_mask_width));
+ mem_addr_4kb += vport_pq_mem_4kb;
+ } else {
+ mem_addr_4kb += pq_mem_4kb;
+ }
+ }
+
+ /* store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++) {
+ if (tx_pq_vf_mask[i]) {
+ if (is_bb_a0) {
+ u32 curr_mask = 0, addr;
+
+ addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
+ if (!p_params->is_first_pf)
+ curr_mask = qed_rd(p_hwfn, p_ptt,
+ addr);
+
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+
+ STORE_RT_REG(p_hwfn, addr,
+ curr_mask | tx_pq_vf_mask[i]);
+ } else {
+ u32 addr;
+
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+ STORE_RT_REG(p_hwfn, addr,
+ tx_pq_vf_mask[i]);
+ }
+ }
+ }
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ u8 port_id,
+ u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_tids,
+ u32 base_mem_addr_4kb)
+{
+ u16 i, pq_id;
+
+ /* a single other PQ group is used in each PF,
+ * where PQ group i is used in PF i.
+ */
+ u16 pq_group = pf_id;
+ u32 pq_size = num_pf_cids + num_tids;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+
+ /* map PQ group to PF */
+ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+ QM_PQ_SIZE_256B(pq_size));
+ /* set base address */
+ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+ i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ mem_addr_4kb += pq_mem_4kb;
+ }
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+ u32 crd_reg_offset;
+ u32 inc_val;
+ u16 i;
+
+ if (p_params->pf_id < MAX_NUM_PFS_BB)
+ crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
+ else
+ crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
+ (p_params->pf_id % MAX_NUM_PFS_BB);
+
+ inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+ QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+
+ for (i = 0; i < num_tx_pqs; i++) {
+ u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+
+ OVERWRITE_RT_REG(p_hwfn,
+ crd_reg_offset + voq * MAX_NUM_PFS_BB,
+ QM_WFQ_INIT_CRD(inc_val) |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+
+ return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u8 pf_id,
+ u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+ return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 tc, i, vport_id;
+ u32 inc_val;
+
+ /* go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
+ u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
+
+ if (!vport_params[i].vport_wfq)
+ continue;
+
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT WFQ weight configuration");
+ return -1;
+ }
+
+ /* each VPORT can have several VPORT PQ IDs for
+ * different TCs
+ */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id = pq_ids[tc];
+
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
+ STORE_RT_REG(p_hwfn, temp + vport_pq_id,
+ QM_WFQ_UPPER_BOUND |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ QM_WFQ_INIT_CRD(inc_val) |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 i, vport_id;
+
+ /* go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
+static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 reg_val, i;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+ i++) {
+ udelay(QM_STOP_CMD_POLL_PERIOD_US);
+ reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+ }
+
+ /* check if timeout while waiting for SDM command ready */
+ if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Timeout when waiting for QM SDM command ready signal\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd_addr,
+ u32 cmd_data_lsb,
+ u32 cmd_data_msb)
+{
+ if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+ return false;
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+ return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs)
+{
+ return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+ QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int qed_qm_common_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params)
+{
+ /* init AFullOprtnstcCrdMask */
+ u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (p_params->pf_wfq_en <<
+ QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (p_params->vport_wfq_en <<
+ QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (p_params->pf_rl_en <<
+ QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (p_params->vport_rl_en <<
+ QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+ qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+ qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+ qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+ qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+ qed_cmdq_lines_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+ qed_btb_blocks_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+ return 0;
+}
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
+ p_params->num_tids) *
+ QM_OTHER_PQS_PER_PF;
+ u8 tc, i;
+
+ /* clear first Tx PQ ID array for each VPORT */
+ for (i = 0; i < p_params->num_vports; i++)
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+ /* map Other PQs (if any) */
+ qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
+ p_params->num_pf_cids, p_params->num_tids, 0);
+
+ /* map Tx PQs */
+ qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+
+ if (p_params->pf_wfq)
+ if (qed_pf_wfq_rt_init(p_hwfn, p_params))
+ return -1;
+
+ if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
+ return -1;
+
+ if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
+ p_params->num_vports, vport_params))
+ return -1;
+
+ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
+ p_params->num_vports, vport_params))
+ return -1;
+
+ return 0;
+}
+
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLPFCRD + pf_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(vport_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLGLBLCRD + vport_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
+ return 0;
+}
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs)
+{
+ u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+ u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+
+ /* set command's PQ type */
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+ /* set PQ bit in mask (stop command only) */
+ if (!is_release_cmd)
+ pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+ /* if last PQ or end of PQ mask, write command */
+ if ((pq_id == last_pq) ||
+ (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+ (QM_STOP_PQ_MASK_WIDTH - 1))) {
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ PAUSE_MASK, pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ GROUP_ID,
+ pq_id / QM_STOP_PQ_MASK_WIDTH);
+ if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
+ cmd_arr[0], cmd_arr[1]))
+ return false;
+ pq_mask = 0;
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
new file mode 100644
index 000000000000..796f1390e598
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -0,0 +1,531 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+#define QED_INIT_MAX_POLL_COUNT 100
+#define QED_INIT_POLL_PERIOD_US 500
+
+static u32 pxp_global_win[] = {
+ 0,
+ 0,
+ 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+ 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+ 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+void qed_init_iro_array(struct qed_dev *cdev)
+{
+ cdev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
+{
+ int i;
+
+ for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+ p_hwfn->rt_data[i].b_valid = false;
+}
+
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val)
+{
+ p_hwfn->rt_data[rt_offset].init_val = val;
+ p_hwfn->rt_data[rt_offset].b_valid = true;
+}
+
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size / sizeof(u32); i++) {
+ p_hwfn->rt_data[rt_offset + i].init_val = val[i];
+ p_hwfn->rt_data[rt_offset + i].b_valid = true;
+ }
+}
+
+static void qed_init_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 rt_offset,
+ u32 size)
+{
+ struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++) {
+ if (!rt_data[i].b_valid)
+ continue;
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
+ }
+}
+
+int qed_init_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rt_data *rt_data;
+
+ rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
+ if (!rt_data)
+ return -ENOMEM;
+
+ p_hwfn->rt_data = rt_data;
+
+ return 0;
+}
+
+void qed_init_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->rt_data);
+ p_hwfn->rt_data = NULL;
+}
+
+static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 dmae_data_offset,
+ u32 size,
+ const u32 *buf,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ int rc = 0;
+
+ /* Perform DMAE only for lengthy enough sections or for wide-bus */
+ if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+ const u32 *data = buf + dmae_data_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+ } else {
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(buf + dmae_data_offset),
+ addr, size, 0);
+ }
+
+ return rc;
+}
+
+static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 fill,
+ u32 fill_count)
+{
+ static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+ memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+ /* invoke the DMAE virtual/physical buffer API with
+ * 1. DMAE init channel
+ * 2. addr,
+ * 3. p_hwfb->temp_data,
+ * 4. fill_count
+ */
+
+ return qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(&zero_buffer[0]),
+ addr, fill_count,
+ QED_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void qed_init_fill(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 fill,
+ u32 fill_count)
+{
+ u32 i;
+
+ for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ u32 data = le32_to_cpu(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+ u32 offset, output_len, input_len, max_size;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ union init_array_hdr *hdr;
+ const u32 *array_data;
+ int rc = 0;
+ u32 size;
+
+ array_data = cdev->fw_data->arr_data;
+
+ hdr = (union init_array_hdr *)(array_data +
+ dmae_array_offset);
+ data = le32_to_cpu(hdr->raw.data);
+ switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+ case INIT_ARR_ZIPPED:
+ offset = dmae_array_offset + 1;
+ input_len = GET_FIELD(data,
+ INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+ max_size = MAX_ZIPPED_SIZE * 4;
+ memset(p_hwfn->unzip_buf, 0, max_size);
+
+ output_len = qed_unzip_data(p_hwfn, input_len,
+ (u8 *)&array_data[offset],
+ max_size, (u8 *)p_hwfn->unzip_buf);
+ if (output_len) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+ output_len,
+ p_hwfn->unzip_buf,
+ b_must_dmae, b_can_dmae);
+ } else {
+ DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
+ rc = -EINVAL;
+ }
+ break;
+ case INIT_ARR_PATTERN:
+ {
+ u32 repeats = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+ u32 i;
+
+ size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+ for (i = 0; i < repeats; i++, addr += size << 2) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ if (rc)
+ break;
+ }
+ break;
+ }
+ case INIT_ARR_STANDARD:
+ size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+/* init_ops write command */
+static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_can_dmae)
+{
+ u32 data = le32_to_cpu(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ union init_write_args *arg = &cmd->args;
+ int rc = 0;
+
+ /* Sanitize */
+ if (b_must_dmae && !b_can_dmae) {
+ DP_NOTICE(p_hwfn,
+ "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
+ addr);
+ return -EINVAL;
+ }
+
+ switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+ case INIT_SRC_INLINE:
+ qed_wr(p_hwfn, p_ptt, addr,
+ le32_to_cpu(arg->inline_val));
+ break;
+ case INIT_SRC_ZEROS:
+ if (b_must_dmae ||
+ (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
+ le32_to_cpu(arg->zeros_count));
+ else
+ qed_init_fill(p_hwfn, p_ptt, addr, 0,
+ le32_to_cpu(arg->zeros_count));
+ break;
+ case INIT_SRC_ARRAY:
+ rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+ b_must_dmae, b_can_dmae);
+ break;
+ case INIT_SRC_RUNTIME:
+ qed_init_rt(p_hwfn, p_ptt, addr,
+ le16_to_cpu(arg->runtime.offset),
+ le16_to_cpu(arg->runtime.size));
+ break;
+ }
+
+ return rc;
+}
+
+static inline bool comp_eq(u32 val, u32 expected_val)
+{
+ return val == expected_val;
+}
+
+static inline bool comp_and(u32 val, u32 expected_val)
+{
+ return (val & expected_val) == expected_val;
+}
+
+static inline bool comp_or(u32 val, u32 expected_val)
+{
+ return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_read_op *cmd)
+{
+ u32 data = le32_to_cpu(cmd->op_data);
+ u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+
+ bool (*comp_check)(u32 val,
+ u32 expected_val);
+ u32 delay = QED_INIT_POLL_PERIOD_US, val;
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+
+ data = le32_to_cpu(cmd->op_data);
+ if (GET_FIELD(data, INIT_READ_OP_POLL)) {
+ int i;
+
+ switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
+ case INIT_COMPARISON_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_COMPARISON_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_COMPARISON_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ comp_check = NULL;
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ data);
+ return;
+ }
+
+ for (i = 0;
+ i < QED_INIT_MAX_POLL_COUNT &&
+ !comp_check(val, le32_to_cpu(cmd->expected_val));
+ i++) {
+ udelay(delay);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ }
+
+ if (i == QED_INIT_MAX_POLL_COUNT)
+ DP_ERR(p_hwfn,
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+ addr, le32_to_cpu(cmd->expected_val),
+ val, data);
+ }
+}
+
+/* init_ops callbacks entry point */
+static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
+{
+ DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+}
+
+static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
+ u16 *offset,
+ int modes)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ const u8 *modes_tree_buf;
+ u8 arg1, arg2, tree_val;
+
+ modes_tree_buf = cdev->fw_data->modes_tree_buf;
+ tree_val = modes_tree_buf[(*offset)++];
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+ case INIT_MODE_OP_OR:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ return arg1 | arg2;
+ case INIT_MODE_OP_AND:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ return arg1 & arg2;
+ default:
+ tree_val -= MAX_INIT_MODE_OPS;
+ return (modes & (1 << tree_val)) ? 1 : 0;
+ }
+}
+
+static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
+ struct init_if_mode_op *p_cmd,
+ int modes)
+{
+ u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
+
+ if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
+ return 0;
+ else
+ return GET_FIELD(le32_to_cpu(p_cmd->op_data),
+ INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
+ struct init_if_phase_op *p_cmd,
+ u32 phase,
+ u32 phase_id)
+{
+ u32 data = le32_to_cpu(p_cmd->phase_data);
+ u32 op_data = le32_to_cpu(p_cmd->op_data);
+
+ if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+ (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+ GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+ return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
+ else
+ return 0;
+}
+
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 cmd_num, num_init_ops;
+ union init_op *init_ops;
+ bool b_dmae = false;
+ int rc = 0;
+
+ num_init_ops = cdev->fw_data->init_ops_size;
+ init_ops = cdev->fw_data->init_ops;
+
+ p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
+ if (!p_hwfn->unzip_buf) {
+ DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+ return -ENOMEM;
+ }
+
+ for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+ union init_op *cmd = &init_ops[cmd_num];
+ u32 data = le32_to_cpu(cmd->raw.op_data);
+
+ switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+ case INIT_OP_WRITE:
+ rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+ b_dmae);
+ break;
+ case INIT_OP_READ:
+ qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+ break;
+ case INIT_OP_IF_MODE:
+ cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
+ modes);
+ break;
+ case INIT_OP_IF_PHASE:
+ cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ phase, phase_id);
+ b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+ break;
+ case INIT_OP_DELAY:
+ /* qed_init_run is always invoked from
+ * sleep-able context
+ */
+ udelay(le32_to_cpu(cmd->delay.delay));
+ break;
+
+ case INIT_OP_CALLBACK:
+ qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ kfree(p_hwfn->unzip_buf);
+ return rc;
+}
+
+void qed_gtt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 gtt_base;
+ u32 i;
+
+ /* Set the global windows */
+ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+ for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
+ if (pxp_global_win[i])
+ REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+ pxp_global_win[i]);
+}
+
+int qed_init_fw_data(struct qed_dev *cdev,
+ const u8 *data)
+{
+ struct qed_fw_data *fw = cdev->fw_data;
+ struct bin_buffer_hdr *buf_hdr;
+ u32 offset, len;
+
+ if (!data) {
+ DP_NOTICE(cdev, "Invalid fw data\n");
+ return -EINVAL;
+ }
+
+ buf_hdr = (struct bin_buffer_hdr *)data;
+
+ offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+ fw->init_ops = (union init_op *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+ fw->arr_data = (u32 *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+ fw->modes_tree_buf = (u8 *)(data + offset);
+ len = buf_hdr[BIN_BUF_INIT_CMD].length;
+ fw->init_ops_size = len / sizeof(struct init_raw_op);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
new file mode 100644
index 000000000000..1e832049983d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -0,0 +1,110 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INIT_OPS_H
+#define _QED_INIT_OPS_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/**
+ * @brief qed_init_iro_array - init iro_arr.
+ *
+ *
+ * @param cdev
+ */
+void qed_init_iro_array(struct qed_dev *cdev);
+
+/**
+ * @brief qed_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _qed_status_t
+ */
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes);
+
+/**
+ * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _qed_status_t
+ */
+int qed_init_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+/**
+ * @brief
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ * @param size
+ */
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ * Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
new file mode 100644
index 000000000000..2e399b6137a2
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -0,0 +1,1134 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+struct qed_pi_info {
+ qed_int_comp_cb_t comp_cb;
+ void *cookie;
+};
+
+struct qed_sb_sp_info {
+ struct qed_sb_info sb_info;
+
+ /* per protocol index data */
+ struct qed_pi_info pi_info_arr[PIS_PER_SB];
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE (0x3ff)
+struct qed_sb_attn_info {
+ /* Virtual & Physical address of the SB */
+ struct atten_status_block *sb_attn;
+ dma_addr_t sb_phys;
+
+ /* Last seen running index */
+ u16 index;
+
+ /* Previously asserted attentions, which are still unasserted */
+ u16 known_attn;
+
+ /* Cleanup address for the link's general hw attention */
+ u32 mfw_attn_addr;
+};
+
+static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
+ struct qed_sb_attn_info *p_sb_desc)
+{
+ u16 rc = 0;
+ u16 index;
+
+ /* Make certain HW write took affect */
+ mmiowb();
+
+ index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
+ if (p_sb_desc->index != index) {
+ p_sb_desc->index = index;
+ rc = QED_SB_ATT_IDX;
+ }
+
+ /* Make certain we got a consistent view with HW */
+ mmiowb();
+
+ return rc;
+}
+
+/**
+ * @brief qed_int_assertion - handles asserted attention bits
+ *
+ * @param p_hwfn
+ * @param asserted_bits newly asserted bits
+ * @return int
+ */
+static int qed_int_assertion(struct qed_hwfn *p_hwfn,
+ u16 asserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 igu_mask;
+
+ /* Mask the source of the attention in the IGU */
+ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+ igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+ igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "inner known ATTN state: 0x%04x --> 0x%04x\n",
+ sb_attn_sw->known_attn,
+ sb_attn_sw->known_attn | asserted_bits);
+ sb_attn_sw->known_attn |= asserted_bits;
+
+ /* Handle MCP events */
+ if (asserted_bits & 0x100) {
+ qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+ /* Clean the MCP attention */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ sb_attn_sw->mfw_attn_addr, 0);
+ }
+
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_SET_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ (u32)asserted_bits);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
+ asserted_bits);
+
+ return 0;
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return int
+ *
+ */
+static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
+ u16 deasserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 aeu_mask;
+
+ if (deasserted_bits != 0x100)
+ DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n");
+
+ /* Clear IGU indication for the deasserted bits */
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ ~((u32)deasserted_bits));
+
+ /* Unmask deasserted attentions in IGU */
+ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+ /* Clear deassertion from inner state */
+ sb_attn_sw->known_attn &= ~deasserted_bits;
+
+ return 0;
+}
+
+static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+ struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+ u32 attn_bits = 0, attn_acks = 0;
+ u16 asserted_bits, deasserted_bits;
+ __le16 index;
+ int rc = 0;
+
+ /* Read current attention bits/acks - safeguard against attentions
+ * by guaranting work on a synchronized timeframe
+ */
+ do {
+ index = p_sb_attn->sb_index;
+ attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
+ attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
+ } while (index != p_sb_attn->sb_index);
+ p_sb_attn->sb_index = index;
+
+ /* Attention / Deassertion are meaningful (and in correct state)
+ * only when they differ and consistent with known state - deassertion
+ * when previous attention & current ack, and assertion when current
+ * attention with no previous attention
+ */
+ asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+ ~p_sb_attn_sw->known_attn;
+ deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+ p_sb_attn_sw->known_attn;
+
+ if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
+ DP_INFO(p_hwfn,
+ "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+ index, attn_bits, attn_acks, asserted_bits,
+ deasserted_bits, p_sb_attn_sw->known_attn);
+ } else if (asserted_bits == 0x100) {
+ DP_INFO(p_hwfn,
+ "MFW indication via attention\n");
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "MFW indication [deassertion]\n");
+ }
+
+ if (asserted_bits) {
+ rc = qed_int_assertion(p_hwfn, asserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ if (deasserted_bits) {
+ rc = qed_int_deassertion(p_hwfn, deasserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
+ void __iomem *igu_addr,
+ u32 ack_cons)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_ATTN <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ mmiowb();
+ barrier();
+}
+
+void qed_int_sp_dpc(unsigned long hwfn_cookie)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+ struct qed_pi_info *pi_info = NULL;
+ struct qed_sb_attn_info *sb_attn;
+ struct qed_sb_info *sb_info;
+ int arr_size;
+ u16 rc = 0;
+
+ if (!p_hwfn) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sp_sb) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
+ return;
+ }
+
+ sb_info = &p_hwfn->p_sp_sb->sb_info;
+ arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+ if (!sb_info) {
+ DP_ERR(p_hwfn->cdev,
+ "Status block is NULL - cannot ack interrupts\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sb_attn) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
+ return;
+ }
+ sb_attn = p_hwfn->p_sb_attn;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+ p_hwfn, p_hwfn->my_id);
+
+ /* Disable ack for def status block. Required both for msix +
+ * inta in non-mask mode, in inta does no harm.
+ */
+ qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+ /* Gather Interrupts/Attentions information */
+ if (!sb_info->sb_virt) {
+ DP_ERR(
+ p_hwfn->cdev,
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+ } else {
+ u32 tmp_index = sb_info->sb_ack;
+
+ rc = qed_sb_update_sb_idx(sb_info);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Interrupt indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_info->sb_ack);
+ }
+
+ if (!sb_attn || !sb_attn->sb_attn) {
+ DP_ERR(
+ p_hwfn->cdev,
+ "Attentions Status block is NULL - cannot check for new attentions!\n");
+ } else {
+ u16 tmp_index = sb_attn->index;
+
+ rc |= qed_attn_update_idx(p_hwfn, sb_attn);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Attention indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_attn->index);
+ }
+
+ /* Check if we expect interrupts at this time. if not just ack them */
+ if (!(rc & QED_SB_EVENT_MASK)) {
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+ if (!p_hwfn->p_dpc_ptt) {
+ DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ if (rc & QED_SB_ATT_IDX)
+ qed_int_attentions(p_hwfn);
+
+ if (rc & QED_SB_IDX) {
+ int pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < arr_size; pi++) {
+ pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+ if (pi_info->comp_cb)
+ pi_info->comp_cb(p_hwfn, pi_info->cookie);
+ }
+ }
+
+ if (sb_attn && (rc & QED_SB_ATT_IDX))
+ /* This should be done before the interrupts are enabled,
+ * since otherwise a new attention will be generated.
+ */
+ qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (p_sb) {
+ if (p_sb->sb_attn)
+ dma_free_coherent(&cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_attn,
+ p_sb->sb_phys);
+ kfree(p_sb);
+ }
+}
+
+static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+ sb_info->index = 0;
+ sb_info->known_attn = 0;
+
+ /* Configure Attention Status Block in IGU */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+ lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+ upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ sb_info->sb_attn = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ /* Set the address of cleanup for the mcp attention */
+ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+ MISC_REG_AEU_GENERAL_ATTN_0;
+
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_sb_attn_info *p_sb;
+ void *p_virt;
+ dma_addr_t p_phys = 0;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ if (!p_sb) {
+ DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+ return -ENOMEM;
+ }
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Attention setup */
+ p_hwfn->p_sb_attn = p_sb;
+ qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+ return 0;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define QED_CAU_DEF_RX_USECS 24
+#define QED_CAU_DEF_TX_USECS 48
+
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id,
+ u16 vf_number,
+ u8 vf_valid)
+{
+ u32 cau_state;
+
+ memset(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+ /* setting the time resultion to a fixed value ( = 1) */
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+ QED_CAU_DEF_RX_TIMER_RES);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+ QED_CAU_DEF_TX_TIMER_RES);
+
+ cau_state = CAU_HC_DISABLE_STATE;
+
+ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ cau_state = CAU_HC_ENABLE_STATE;
+ if (!p_hwfn->cdev->rx_coalesce_usecs)
+ p_hwfn->cdev->rx_coalesce_usecs =
+ QED_CAU_DEF_RX_USECS;
+ if (!p_hwfn->cdev->tx_coalesce_usecs)
+ p_hwfn->cdev->tx_coalesce_usecs =
+ QED_CAU_DEF_TX_USECS;
+ }
+
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid)
+{
+ struct cau_sb_entry sb_entry;
+ u32 val;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+ vf_number, vf_valid);
+
+ if (p_hwfn->hw_init_done) {
+ val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64);
+ qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys));
+ qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
+ upper_32_bits(sb_phys));
+
+ val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64);
+ qed_wr(p_hwfn, p_ptt, val, sb_entry.data);
+ qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params);
+ } else {
+ /* Initialize Status Block Address */
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_phys);
+
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_entry);
+ }
+
+ /* Configure pi coalescing if set */
+ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
+ (QED_CAU_DEF_RX_TIMER_RES + 1);
+ u8 num_tc = 1, i;
+
+ qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ QED_COAL_RX_STATE_MACHINE,
+ timeset);
+
+ timeset = p_hwfn->cdev->tx_coalesce_usecs >>
+ (QED_CAU_DEF_TX_TIMER_RES + 1);
+
+ for (i = 0; i < num_tc; i++) {
+ qed_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ QED_COAL_TX_STATE_MACHINE,
+ timeset);
+ }
+ }
+}
+
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset;
+ u32 pi_offset;
+
+ sb_offset = igu_sb_id * PIS_PER_SB;
+ memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info)
+{
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief qed_get_igu_sb_id - given a sw sb_id return the
+ * igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
+ u16 sb_id)
+{
+ u16 igu_sb_id;
+
+ /* Assuming continuous set of IGU SBs dedicated for given PF */
+ if (sb_id == QED_SP_SB_ID)
+ igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ else
+ igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
+ (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+
+ return igu_sb_id;
+}
+
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id)
+{
+ sb_info->sb_virt = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id != QED_SP_SB_ID) {
+ p_hwfn->sbs_info[sb_id] = sb_info;
+ p_hwfn->num_sbs++;
+ }
+
+ sb_info->cdev = p_hwfn->cdev;
+
+ /* The igu address will hold the absolute address that needs to be
+ * written to for a specific status block
+ */
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
+
+ sb_info->flags |= QED_SB_INFO_INIT;
+
+ qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+ return 0;
+}
+
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ if (sb_id == QED_SP_SB_ID) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return -EINVAL;
+ }
+
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ p_hwfn->sbs_info[sb_id] = NULL;
+ p_hwfn->num_sbs--;
+
+ return 0;
+}
+
+static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+ if (p_sb) {
+ if (p_sb->sb_info.sb_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys);
+ kfree(p_sb);
+ }
+}
+
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_sb_sp_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ if (!p_sb) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+ return -ENOMEM;
+ }
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Status Block setup */
+ p_hwfn->p_sp_sb = p_sb;
+ qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
+ p_phys, QED_SP_SB_ID);
+
+ memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+ return 0;
+}
+
+static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ if (!p_hwfn)
+ return;
+
+ if (p_hwfn->p_sp_sb)
+ qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ else
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to setup Slow path status block - NULL pointer\n");
+
+ if (p_hwfn->p_sb_attn)
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+ else
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to setup attentions status block - NULL pointer\n");
+}
+
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ int qed_status = -ENOMEM;
+ u8 pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+ if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ qed_status = 0;
+ break;
+ }
+ }
+
+ return qed_status;
+}
+
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ int qed_status = -ENOMEM;
+
+ if (p_sp_sb->pi_info_arr[pi].comp_cb) {
+ p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = NULL;
+ qed_status = 0;
+ }
+
+ return qed_status;
+}
+
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+ p_hwfn->cdev->int_mode = int_mode;
+ switch (p_hwfn->cdev->int_mode) {
+ case QED_INT_MODE_INTA:
+ igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSI:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSIX:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ break;
+ case QED_INT_MODE_POLL:
+ break;
+ }
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
+{
+ int i;
+
+ p_hwfn->b_int_enabled = 1;
+
+ /* Mask non-link attentions */
+ for (i = 0; i < 9; i++)
+ qed_wr(p_hwfn, p_ptt,
+ MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
+
+ /* Enable interrupt Generation */
+ qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+ /* Configure AEU signal change to produce attentions for link */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+
+ /* Flush the writes to IGU */
+ mmiowb();
+
+ /* Unmask AEU signals toward IGU */
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ p_hwfn->b_int_enabled = 0;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH (1000)
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ bool cleanup_set,
+ u16 opaque_fid
+ )
+{
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+ u32 data = 0;
+ u32 cmd_ctrl = 0;
+ u32 val = 0;
+ u32 sb_bit = 0;
+ u32 sb_bit_addr = 0;
+
+ /* Set the data field */
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
+ SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+ /* Set the control register */
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+ barrier();
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+ /* Flush the write to IGU */
+ mmiowb();
+
+ /* calculate where to read the status bit from */
+ sb_bit = 1 << (sb_id % 32);
+ sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+ sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
+
+ /* Now wait for the command to complete */
+ do {
+ val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
+
+ if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+ break;
+
+ usleep_range(5000, 10000);
+ } while (--sleep_cnt);
+
+ if (!sleep_cnt)
+ DP_NOTICE(p_hwfn,
+ "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+ val, sb_id);
+}
+
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ u16 opaque,
+ bool b_set)
+{
+ int pi;
+
+ /* Set */
+ if (b_set)
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+ /* Clear */
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+ /* Clear the CAU for the SB */
+ for (pi = 0; pi < 12; pi++)
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath)
+{
+ u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+ u32 sb_id = 0;
+ u32 val = 0;
+
+ val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+ val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+ val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+ qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning SBs [%d,...,%d]\n",
+ igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+ for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+
+ if (b_slowpath) {
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+ }
+}
+
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *blk;
+ u32 val;
+ u16 sb_id;
+ u16 prev_sb_id = 0xFF;
+
+ p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC);
+
+ if (!p_hwfn->hw_info.p_igu_info)
+ return -ENOMEM;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Initialize base sb / sb cnt for PFs */
+ p_igu_info->igu_base_sb = 0xffff;
+ p_igu_info->igu_sb_cnt = 0;
+ p_igu_info->igu_dsb_id = 0xffff;
+ p_igu_info->igu_base_sb_iov = 0xffff;
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ sb_id++) {
+ blk = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+
+ /* stop scanning when hit first invalid PF entry */
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ break;
+
+ blk->status = QED_IGU_STATUS_VALID;
+ blk->function_id = GET_FIELD(val,
+ IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ blk->vector_number = GET_FIELD(val,
+ IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
+ val, blk->function_id, blk->is_pf,
+ blk->vector_number);
+
+ if (blk->is_pf) {
+ if (blk->function_id == p_hwfn->rel_pf_id) {
+ blk->status |= QED_IGU_STATUS_PF;
+
+ if (blk->vector_number == 0) {
+ if (p_igu_info->igu_dsb_id == 0xffff)
+ p_igu_info->igu_dsb_id = sb_id;
+ } else {
+ if (p_igu_info->igu_base_sb ==
+ 0xffff) {
+ p_igu_info->igu_base_sb = sb_id;
+ } else if (prev_sb_id != sb_id - 1) {
+ DP_NOTICE(p_hwfn->cdev,
+ "consecutive igu vectors for HWFN %x broken",
+ p_hwfn->rel_pf_id);
+ break;
+ }
+ prev_sb_id = sb_id;
+ /* we don't count the default */
+ (p_igu_info->igu_sb_cnt)++;
+ }
+ }
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+
+ if (p_igu_info->igu_base_sb == 0xffff ||
+ p_igu_info->igu_dsb_id == 0xffff ||
+ p_igu_info->igu_sb_cnt == 0) {
+ DP_NOTICE(p_hwfn,
+ "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
+{
+ u32 igu_pf_conf = 0;
+
+ igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+
+ STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
+{
+ u64 intr_status = 0;
+ u32 intr_status_lo = 0;
+ u32 intr_status_hi = 0;
+ u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+ u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+
+ intr_status_lo = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ lsb_igu_cmd_addr * 8);
+ intr_status_hi = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ msb_igu_cmd_addr * 8);
+ intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+ return intr_status;
+}
+
+static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
+{
+ tasklet_init(p_hwfn->sp_dpc,
+ qed_int_sp_dpc, (unsigned long)p_hwfn);
+ p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
+{
+ p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC);
+ if (!p_hwfn->sp_dpc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->sp_dpc);
+}
+
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ int rc = 0;
+
+ rc = qed_int_sp_dpc_alloc(p_hwfn);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+ return rc;
+ }
+ rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+ return rc;
+ }
+ rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
+ return rc;
+ }
+ return rc;
+}
+
+void qed_int_free(struct qed_hwfn *p_hwfn)
+{
+ qed_int_sp_sb_free(p_hwfn);
+ qed_int_sb_attn_free(p_hwfn);
+ qed_int_sp_dpc_free(p_hwfn);
+}
+
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ qed_int_sp_sb_setup(p_hwfn, p_ptt);
+ qed_int_sp_dpc_setup(p_hwfn);
+}
+
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ int *p_iov_blks)
+{
+ struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+ if (!info)
+ return 0;
+
+ if (p_iov_blks)
+ *p_iov_blks = info->free_blks;
+
+ return info->igu_sb_cnt;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
new file mode 100644
index 000000000000..16b57518e706
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -0,0 +1,391 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INT_H
+#define _QED_INT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
+#define IGU_CTRL_REG_FID_SHIFT 0
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
+#define IGU_CTRL_REG_RESERVED_MASK 0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT 28
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT 31
+};
+
+enum qed_coalescing_fsm {
+ QED_COAL_RX_STATE_MACHINE,
+ QED_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief qed_int_cau_conf_pi - configure cau for a given
+ * status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset);
+
+/**
+ * @brief qed_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * @brief qed_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ * register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
+
+#define QED_SP_SB_ID 0xffff
+/**
+ * @brief qed_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info points to an uninitialized (but
+ * allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should use QED_SP_SB_ID for SP Status block
+ *
+ * @return int
+ */
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id);
+/**
+ * @brief qed_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info initialized sb_info structure
+ */
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info);
+
+/**
+ * @brief qed_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info points to an allocated sb_info structure
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should never be equal to QED_SP_SB_ID
+ * (SP Status block)
+ *
+ * @return int
+ */
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id);
+
+/**
+ * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
+ * default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void qed_int_sp_dpc(unsigned long hwfn_cookie);
+
+/**
+ * @brief qed_int_get_num_sbs - get the number of status
+ * blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_iov_blks - configured free blks for vfs
+ *
+ * @return int - number of status blocks configured
+ */
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ int *p_iov_blks);
+
+/**
+ * @file
+ *
+ * @brief Interrupt handler
+ */
+
+#define QED_CAU_DEF_RX_TIMER_RES 0
+#define QED_CAU_DEF_TX_TIMER_RES 0
+
+#define QED_SB_ATT_IDX 0x0001
+#define QED_SB_EVENT_MASK 0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct qed_igu_block {
+ u8 status;
+#define QED_IGU_STATUS_FREE 0x01
+#define QED_IGU_STATUS_VALID 0x02
+#define QED_IGU_STATUS_PF 0x04
+
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
+};
+
+struct qed_igu_map {
+ struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct qed_igu_info {
+ struct qed_igu_map igu_map;
+ u16 igu_dsb_id;
+ u16 igu_base_sb;
+ u16 igu_base_sb_iov;
+ u16 igu_sb_cnt;
+ u16 igu_sb_cnt_iov;
+ u16 free_blks;
+};
+
+/* TODO Names of function may change... */
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath);
+
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ * This function needs to be called during hardware
+ * prepare. It reads the info from igu cam to know which
+ * status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
+ void *cookie);
+/**
+ * @brief qed_int_register_cb - Register callback func for
+ * slowhwfn statusblock.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ * interrupt on the sp sb
+ *
+ * @param cookie - passed to the callback function
+ * @param sb_idx - OUT parameter which gives the chosen index
+ * for this protocol.
+ * @param p_fw_cons - pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * @return int
+ */
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons);
+
+/**
+ * @brief qed_int_unregister_cb - Unregisters callback
+ * function from sp sb.
+ * Partner of qed_int_register_cb -> should be called
+ * when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return int
+ */
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
+ u8 pi);
+
+/**
+ * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param cleanup_set - set(1) / clear(0)
+ * @param opaque_fid - the function for which to perform
+ * cleanup, for example a PF on behalf of
+ * its VFs.
+ */
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ bool cleanup_set,
+ u16 opaque_fid);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param opaque - opaque fid of the sb owner.
+ * @param cleanup_set - set(1) / clear(0)
+ */
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ u16 opaque,
+ bool b_set);
+
+/**
+ * @brief qed_int_cau_conf - configure cau for a given status
+ * block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+/**
+ * @brief qed_int_alloc
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_free
+ *
+ * @param p_hwfn
+ */
+void qed_int_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_setup
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ */
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
new file mode 100644
index 000000000000..f72036a2ef5b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -0,0 +1,1704 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include <linux/qed/qed_eth_if.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+enum qed_rss_caps {
+ QED_RSS_IPV4 = 0x1,
+ QED_RSS_IPV6 = 0x2,
+ QED_RSS_IPV4_TCP = 0x4,
+ QED_RSS_IPV6_TCP = 0x8,
+ QED_RSS_IPV4_UDP = 0x10,
+ QED_RSS_IPV6_UDP = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
+
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+ u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
+
+enum qed_filter_opcode {
+ QED_FILTER_ADD,
+ QED_FILTER_REMOVE,
+ QED_FILTER_MOVE,
+ QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ QED_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+ QED_FILTER_MAC,
+ QED_FILTER_VLAN,
+ QED_FILTER_MAC_VLAN,
+ QED_FILTER_INNER_MAC,
+ QED_FILTER_INNER_VLAN,
+ QED_FILTER_INNER_PAIR,
+ QED_FILTER_INNER_MAC_VNI_PAIR,
+ QED_FILTER_MAC_VNI_PAIR,
+ QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+ enum qed_filter_opcode opcode;
+ enum qed_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct qed_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum qed_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS 64
+ unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct qed_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define QED_ACCEPT_NONE 0x01
+#define QED_ACCEPT_UCAST_MATCHED 0x02
+#define QED_ACCEPT_UCAST_UNMATCHED 0x04
+#define QED_ACCEPT_MCAST_MATCHED 0x08
+#define QED_ACCEPT_MCAST_UNMATCHED 0x10
+#define QED_ACCEPT_BCAST 0x20
+};
+
+struct qed_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_approx_mcast_flg;
+ unsigned long bins[8];
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+};
+
+#define QED_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ u32 concrete_fid,
+ u16 opaque_fid,
+ u8 vport_id,
+ u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg)
+{
+ struct qed_sp_init_request_params params;
+ struct vport_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+ u16 rx_mode = 0;
+ u8 abs_vport_id = 0;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&params, 0, sizeof(params));
+ params.ramrod_data_size = sizeof(*p_ramrod);
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_VPORT_START,
+ PROTOCOLID_ETH,
+ &params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->mtu = cpu_to_le16(mtu);
+ p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
+ p_ramrod->drop_ttl0_en = drop_ttl0_flg;
+
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+ p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
+
+ /* TPA related fields */
+ memset(&p_ramrod->tpa_param, 0,
+ sizeof(struct eth_vport_tpa_param));
+
+ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
+ concrete_fid);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_rss_params *p_params)
+{
+ struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
+ u16 abs_l2_queue = 0, capabilities = 0;
+ int rc = 0, i;
+
+ if (!p_params) {
+ p_ramrod->common.update_rss_flg = 0;
+ return rc;
+ }
+
+ BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
+ ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+ rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+ if (rc)
+ return rc;
+
+ p_ramrod->common.update_rss_flg = p_params->update_rss_config;
+ rss->update_rss_capabilities = p_params->update_rss_capabilities;
+ rss->update_rss_ind_table = p_params->update_rss_ind_table;
+ rss->update_rss_key = p_params->update_rss_key;
+
+ rss->rss_mode = p_params->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR :
+ ETH_VPORT_RSS_MODE_DISABLED;
+
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
+ rss->tbl_size = p_params->rss_table_size_log;
+
+ rss->capabilities = cpu_to_le16(capabilities);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+ p_ramrod->common.update_rss_flg,
+ rss->rss_mode, rss->update_rss_capabilities,
+ capabilities, rss->update_rss_ind_table,
+ rss->update_rss_key);
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ rc = qed_fw_l2_queue(p_hwfn,
+ (u8)p_params->rss_ind_table[i],
+ &abs_l2_queue);
+ if (rc)
+ return rc;
+
+ rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
+ i, rss->indirection_table[i]);
+ }
+
+ for (i = 0; i < 10; i++)
+ rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+
+ return rc;
+}
+
+static void
+qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_filter_accept_flags accept_flags)
+{
+ p_ramrod->common.update_rx_mode_flg =
+ accept_flags.update_rx_mode_config;
+
+ p_ramrod->common.update_tx_mode_flg =
+ accept_flags.update_tx_mode_config;
+
+ /* Set Rx mode accept flags */
+ if (p_ramrod->common.update_rx_mode_flg) {
+ u8 accept_filter = accept_flags.rx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ p_ramrod->rx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->rx_mode.state = 0x%x\n", state);
+ }
+
+ /* Set Tx mode accept flags */
+ if (p_ramrod->common.update_tx_mode_flg) {
+ u8 accept_filter = accept_flags.tx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ p_ramrod->tx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->tx_mode.state = 0x%x\n", state);
+ }
+}
+
+static void
+qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_sp_vport_update_params *p_params)
+{
+ int i;
+
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+
+ if (p_params->update_approx_mcast_flg) {
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+ __le32 val = cpu_to_le32(p_bins[i]);
+
+ p_ramrod->approx_mcast.bins[i] = val;
+ }
+ }
+}
+
+static int
+qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_rss_params *p_rss_params = p_params->rss_params;
+ struct vport_update_ramrod_data_cmn *p_cmn;
+ struct qed_sp_init_request_params sp_params;
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_params->opaque_fid,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ /* Copy input params to ramrod according to FW struct */
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_cmn = &p_ramrod->common;
+
+ p_cmn->vport_id = abs_vport_id;
+ p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+ p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+ p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+ p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+
+ rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+ if (rc) {
+ /* Return spq entry which is taken in qed_sp_init_request()*/
+ qed_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+
+ /* Update mcast bins for VFs, PF doesn't use this functionality */
+ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+ qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u8 vport_id)
+{
+ struct qed_sp_init_request_params sp_params;
+ struct vport_stop_ramrod_data *p_ramrod;
+ struct qed_spq_entry *p_ent;
+ u8 abs_vport_id = 0;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_VPORT_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_stop;
+ p_ramrod->vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_filter_accept_cmd(struct qed_dev *cdev,
+ u8 vport,
+ struct qed_filter_accept_flags accept_flags,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_sp_vport_update_params vport_update_params;
+ int i, rc;
+
+ /* Prepare and send the vport rx_mode change */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport;
+ vport_update_params.accept_flags = accept_flags;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
+ comp_mode, p_comp_data);
+ if (rc != 0) {
+ DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+ accept_flags.rx_accept_filter,
+ accept_flags.tx_accept_filter);
+ }
+
+ return 0;
+}
+
+static int qed_sp_release_queue_cid(
+ struct qed_hwfn *p_hwfn,
+ struct qed_hw_cid_data *p_cid_data)
+{
+ if (!p_cid_data->b_cid_allocated)
+ return 0;
+
+ qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
+
+ p_cid_data->b_cid_allocated = false;
+
+ return 0;
+}
+
+static int
+qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
+{
+ struct rx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_hw_cid_data *p_rx_cid;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ /* Store information for the stop */
+ p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+ p_rx_cid->cid = cid;
+ p_rx_cid->opaque_fid = opaque_fid;
+ p_rx_cid->vport_id = params->vport_id;
+
+ rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
+ if (rc != 0)
+ return rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, cid, params->queue_id, params->vport_id,
+ params->sb);
+
+ memset(&sp_params, 0, sizeof(params));
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ cid, opaque_fid,
+ ETH_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(params->sb);
+ p_ramrod->sb_index = params->sb_idx;
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
+ p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr);
+ p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr);
+
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr);
+ p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ return rc;
+}
+
+static int
+qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod)
+{
+ struct qed_hw_cid_data *p_rx_cid;
+ u64 init_prod_val = 0;
+ u16 abs_l2_queue = 0;
+ u8 abs_stats_id = 0;
+ int rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
+ if (rc != 0)
+ return rc;
+
+ rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
+ if (rc != 0)
+ return rc;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_PRODS_OFFSET(abs_l2_queue);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ (u32 *)(&init_prod_val));
+
+ /* Allocate a CID for the queue */
+ p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &p_rx_cid->cid);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_rx_cid->b_cid_allocated = true;
+
+ rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
+ opaque_fid,
+ p_rx_cid->cid,
+ params,
+ abs_stats_id,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size);
+
+ if (rc != 0)
+ qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+ return rc;
+}
+
+static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+ struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ u16 abs_rx_q_id = 0;
+ int rc = -EINVAL;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ p_rx_cid->cid,
+ p_rx_cid->opaque_fid,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+ qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+
+ /* Cleaning the queue requires the completion to arrive there.
+ * In addition, VFs require the answer to come as eqe to PF.
+ */
+ p_ramrod->complete_cqe_flg =
+ (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
+ !eq_completion_only) || cqe_completion;
+ p_ramrod->complete_event_flg =
+ !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
+ eq_completion_only;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+}
+
+static int
+qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params)
+{
+ struct tx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_hw_cid_data *p_tx_cid;
+ u8 abs_vport_id;
+ int rc = -EINVAL;
+ u16 pq_id;
+
+ /* Store information for the stop */
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+ p_tx_cid->cid = cid;
+ p_tx_cid->opaque_fid = opaque_fid;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, cid,
+ opaque_fid,
+ ETH_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->tc = p_pq_params->eth.tc;
+
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+ p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
+ p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+
+ pq_id = qed_get_qm_pq(p_hwfn,
+ PROTOCOLID_ETH,
+ p_pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell)
+{
+ struct qed_hw_cid_data *p_tx_cid;
+ union qed_qm_pq_params pq_params;
+ u8 abs_stats_id = 0;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+ if (rc)
+ return rc;
+
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+ memset(p_tx_cid, 0, sizeof(*p_tx_cid));
+ memset(&pq_params, 0, sizeof(pq_params));
+
+ /* Allocate a CID for the queue */
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &p_tx_cid->cid);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_tx_cid->b_cid_allocated = true;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, p_tx_cid->cid,
+ p_params->queue_id, p_params->vport_id, p_params->sb);
+
+ rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+ opaque_fid,
+ p_tx_cid->cid,
+ p_params,
+ abs_stats_id,
+ pbl_addr,
+ pbl_size,
+ &pq_params);
+
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+ if (rc)
+ qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+ return rc;
+}
+
+static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id)
+{
+ struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ p_tx_cid->cid,
+ p_tx_cid->opaque_fid,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+}
+
+static enum eth_filter_action
+qed_filter_action(enum qed_filter_opcode opcode)
+{
+ enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+ switch (opcode) {
+ case QED_FILTER_ADD:
+ action = ETH_FILTER_ACTION_ADD;
+ break;
+ case QED_FILTER_REMOVE:
+ action = ETH_FILTER_ACTION_REMOVE;
+ break;
+ case QED_FILTER_REPLACE:
+ case QED_FILTER_FLUSH:
+ action = ETH_FILTER_ACTION_REPLACE;
+ break;
+ default:
+ action = MAX_ETH_FILTER_ACTION;
+ }
+
+ return action;
+}
+
+static void qed_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid,
+ __le16 *fw_lsb,
+ u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static int
+qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ struct vport_filter_update_ramrod_data **pp_ramrod,
+ struct qed_spq_entry **pp_ent,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+ struct vport_filter_update_ramrod_data *p_ramrod;
+ struct qed_sp_init_request_params sp_params;
+ struct eth_filter_cmd *p_first_filter;
+ struct eth_filter_cmd *p_second_filter;
+ enum eth_filter_action action;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &vport_to_remove_from);
+ if (rc)
+ return rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &vport_to_add_to);
+ if (rc)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(**pp_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, pp_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_FILTERS_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+ p_ramrod = *pp_ramrod;
+ p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+ p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+ switch (p_filter_cmd->opcode) {
+ case QED_FILTER_FLUSH:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
+ case QED_FILTER_MOVE:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
+ default:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
+ }
+
+ p_first_filter = &p_ramrod->filter_cmds[0];
+ p_second_filter = &p_ramrod->filter_cmds[1];
+
+ switch (p_filter_cmd->type) {
+ case QED_FILTER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
+ case QED_FILTER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
+ case QED_FILTER_MAC_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
+ case QED_FILTER_INNER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
+ case QED_FILTER_INNER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
+ case QED_FILTER_INNER_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
+ case QED_FILTER_INNER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+ break;
+ case QED_FILTER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
+ case QED_FILTER_VNI:
+ p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
+ qed_set_fw_mac_addr(&p_first_filter->mac_msb,
+ &p_first_filter->mac_mid,
+ &p_first_filter->mac_lsb,
+ (u8 *)p_filter_cmd->mac);
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+ p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+ p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
+
+ if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
+
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+ p_first_filter->vport_id = vport_to_remove_from;
+
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
+ } else {
+ action = qed_filter_action(p_filter_cmd->opcode);
+
+ if (action == MAX_ETH_FILTER_ACTION) {
+ DP_NOTICE(p_hwfn,
+ "%d is not supported yet\n",
+ p_filter_cmd->opcode);
+ return -EINVAL;
+ }
+
+ p_first_filter->action = action;
+ p_first_filter->vport_id = (p_filter_cmd->opcode ==
+ QED_FILTER_REMOVE) ?
+ vport_to_remove_from :
+ vport_to_add_to;
+ }
+
+ return 0;
+}
+
+static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct vport_filter_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct eth_filter_cmd_header *p_header;
+ int rc;
+
+ rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+ &p_ramrod, &p_ent,
+ comp_mode, p_comp_data);
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+ return rc;
+ }
+ p_header = &p_ramrod->filter_cmd_hdr;
+ p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc != 0) {
+ DP_ERR(p_hwfn,
+ "Unicast filter ADD command failed %d\n",
+ rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+ (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
+ ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
+ "REMOVE" :
+ ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
+ "MOVE" : "REPLACE")),
+ (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
+ ((p_filter_cmd->type == QED_FILTER_VLAN) ?
+ "VLAN" : "MAC & VLAN"),
+ p_ramrod->filter_cmd_hdr.cmd_cnt,
+ p_filter_cmd->is_rx_filter,
+ p_filter_cmd->is_tx_filter);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+ p_filter_cmd->vport_to_add_to,
+ p_filter_cmd->vport_to_remove_from,
+ p_filter_cmd->mac[0],
+ p_filter_cmd->mac[1],
+ p_filter_cmd->mac[2],
+ p_filter_cmd->mac[3],
+ p_filter_cmd->mac[4],
+ p_filter_cmd->mac[5],
+ p_filter_cmd->vlan);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Description:
+ * Calculates crc 32 on a buffer
+ * Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 qed_calc_crc32c(u8 *crc32_packet,
+ u32 crc32_length,
+ u32 crc32_seed,
+ u8 complement)
+{
+ u32 byte = 0;
+ u32 bit = 0;
+ u8 msb = 0;
+ u8 current_byte = 0;
+ u32 crc32_result = crc32_seed;
+
+ if ((!crc32_packet) ||
+ (crc32_length == 0) ||
+ ((crc32_length % 8) != 0))
+ return crc32_result;
+ for (byte = 0; byte < crc32_length; byte++) {
+ current_byte = crc32_packet[byte];
+ for (bit = 0; bit < 8; bit++) {
+ msb = (u8)(crc32_result >> 31);
+ crc32_result = crc32_result << 1;
+ if (msb != (0x1 & (current_byte >> bit))) {
+ crc32_result = crc32_result ^ CRC32_POLY;
+ crc32_result |= 1; /*crc32_result[0] = 1;*/
+ }
+ }
+ }
+ return crc32_result;
+}
+
+static inline u32 qed_crc32c_le(u32 seed,
+ u8 *mac,
+ u32 len)
+{
+ u32 packet_buf[2] = { 0 };
+
+ memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
+ return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+static u8 qed_mcast_bin_from_mac(u8 *mac)
+{
+ u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+ mac, ETH_ALEN);
+
+ return crc & 0xff;
+}
+
+static int
+qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ u8 abs_vport_id = 0;
+ int rc, i;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &abs_vport_id);
+ if (rc)
+ return rc;
+ } else {
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &abs_vport_id);
+ if (rc)
+ return rc;
+ }
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+
+ if (rc) {
+ DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_ramrod->common.update_approx_mcast_flg = 1;
+
+ /* explicitly clear out the entire vector */
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+ memset(bins, 0, sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ /* filter ADD op is explicit set op and it removes
+ * any existing filters for the vport
+ */
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ __set_bit(bit, bins);
+ }
+
+ /* Convert to correct endianity */
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)bins;
+ struct vport_update_ramrod_mcast *approx_mcast;
+
+ approx_mcast = &p_ramrod->approx_mcast;
+ approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+ }
+ }
+
+ p_ramrod->common.vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_filter_mcast_cmd(struct qed_dev *cdev,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ /* only ADD and REMOVE operations are supported for multi-cast */
+ if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
+ (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
+ (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
+ return -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ u16 opaque_fid;
+
+ if (rc != 0)
+ break;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_mcast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode,
+ p_comp_data);
+ }
+ return rc;
+}
+
+static int qed_filter_ucast_cmd(struct qed_dev *cdev,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u16 opaque_fid;
+
+ if (rc != 0)
+ break;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode,
+ p_comp_data);
+ }
+
+ return rc;
+}
+
+static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+ struct qed_dev_eth_info *info)
+{
+ int i;
+
+ memset(info, 0, sizeof(*info));
+
+ info->num_tc = 1;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i)
+ info->num_queues += FEAT_NUM(&cdev->hwfns[i],
+ QED_PF_L2_QUE);
+ if (cdev->int_params.fp_msix_cnt)
+ info->num_queues = min_t(u8, info->num_queues,
+ cdev->int_params.fp_msix_cnt);
+ } else {
+ info->num_queues = cdev->num_hwfns;
+ }
+
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+ ether_addr_copy(info->port_mac,
+ cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static void qed_register_eth_ops(struct qed_dev *cdev,
+ struct qed_eth_cb_ops *ops,
+ void *cookie)
+{
+ cdev->protocol_ops.eth = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static int qed_start_vport(struct qed_dev *cdev,
+ u8 vport_id,
+ u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_vport_start(p_hwfn,
+ p_hwfn->hw_info.concrete_fid,
+ p_hwfn->hw_info.opaque_fid,
+ vport_id,
+ mtu,
+ drop_ttl0_flg,
+ inner_vlan_removal_en_flg);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start VPORT\n");
+ return rc;
+ }
+
+ qed_hw_start_fastpath(p_hwfn);
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started V-PORT %d with MTU %d\n",
+ vport_id, mtu);
+ }
+
+ qed_reset_vport_stats(cdev);
+
+ return 0;
+}
+
+static int qed_stop_vport(struct qed_dev *cdev,
+ u8 vport_id)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_vport_stop(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ vport_id);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop VPORT\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int qed_update_vport(struct qed_dev *cdev,
+ struct qed_update_vport_params *params)
+{
+ struct qed_sp_vport_update_params sp_params;
+ struct qed_rss_params sp_rss_params;
+ int rc, i;
+
+ if (!cdev)
+ return -ENODEV;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+ /* Translate protocol params into sp params */
+ sp_params.vport_id = params->vport_id;
+ sp_params.update_vport_active_rx_flg =
+ params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg =
+ params->update_vport_active_flg;
+ sp_params.vport_active_rx_flg = params->vport_active_flg;
+ sp_params.vport_active_tx_flg = params->vport_active_flg;
+
+ /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
+ * We need to re-fix the rss values per engine for CMT.
+ */
+ if (cdev->num_hwfns > 1 && params->update_rss_flg) {
+ struct qed_update_vport_rss_params *rss =
+ &params->rss_params;
+ int k, max = 0;
+
+ /* Find largest entry, since it's possible RSS needs to
+ * be disabled [in case only 1 queue per-hwfn]
+ */
+ for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+ max = (max > rss->rss_ind_table[k]) ?
+ max : rss->rss_ind_table[k];
+
+ /* Either fix RSS values or disable RSS */
+ if (cdev->num_hwfns < max + 1) {
+ int divisor = (max + cdev->num_hwfns - 1) /
+ cdev->num_hwfns;
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "CMT - fixing RSS values (modulo %02x)\n",
+ divisor);
+
+ for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+ rss->rss_ind_table[k] =
+ rss->rss_ind_table[k] % divisor;
+ } else {
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ params->update_rss_flg = 0;
+ }
+ }
+
+ /* Now, update the RSS configuration for actual configuration */
+ if (params->update_rss_flg) {
+ sp_rss_params.update_rss_config = 1;
+ sp_rss_params.rss_enable = 1;
+ sp_rss_params.update_rss_capabilities = 1;
+ sp_rss_params.update_rss_ind_table = 1;
+ sp_rss_params.update_rss_key = 1;
+ sp_rss_params.rss_caps = QED_RSS_IPV4 |
+ QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
+ memcpy(sp_rss_params.rss_ind_table,
+ params->rss_params.rss_ind_table,
+ QED_RSS_IND_TABLE_SIZE * sizeof(u16));
+ memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
+ QED_RSS_KEY_SIZE * sizeof(u32));
+ }
+ sp_params.rss_params = &sp_rss_params;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = qed_sp_vport_update(p_hwfn, &sp_params,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_ERR(cdev, "Failed to update VPORT\n");
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Updated V-PORT %d: active_flag %d [update %d]\n",
+ params->vport_id, params->vport_active_flg,
+ params->update_vport_active_flg);
+ }
+
+ return 0;
+}
+
+static int qed_start_rxq(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod)
+{
+ int rc, hwfn_index;
+ struct qed_hwfn *p_hwfn;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ /* Fix queue ID in 100g mode */
+ params->queue_id /= cdev->num_hwfns;
+
+ rc = qed_sp_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ pp_prod);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ params->queue_id, params->rss_id, params->vport_id,
+ params->sb);
+
+ return 0;
+}
+
+static int qed_stop_rxq(struct qed_dev *cdev,
+ struct qed_stop_rxq_params *params)
+{
+ int rc, hwfn_index;
+ struct qed_hwfn *p_hwfn;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+ params->rx_queue_id / cdev->num_hwfns,
+ params->eq_completion_only,
+ false);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_start_txq(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = p_params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ /* Fix queue ID in 100g mode */
+ p_params->queue_id /= cdev->num_hwfns;
+
+ rc = qed_sp_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ pbl_addr,
+ pbl_size,
+ pp_doorbell);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, p_params->rss_id, p_params->vport_id,
+ p_params->sb);
+
+ return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static int qed_fastpath_stop(struct qed_dev *cdev)
+{
+ qed_hw_stop_fastpath(cdev);
+
+ return 0;
+}
+
+static int qed_stop_txq(struct qed_dev *cdev,
+ struct qed_stop_txq_params *params)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+ params->tx_queue_id / cdev->num_hwfns);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qed_filter_accept_flags accept_flags;
+
+ memset(&accept_flags, 0, sizeof(accept_flags));
+
+ accept_flags.update_rx_mode_config = 1;
+ accept_flags.update_tx_mode_config = 1;
+ accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+ accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+ accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+ QED_ACCEPT_MCAST_UNMATCHED;
+ else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+ accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+
+ return qed_filter_accept_cmd(cdev, 0, accept_flags,
+ QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_ucast(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params)
+{
+ struct qed_filter_ucast ucast;
+
+ if (!params->vlan_valid && !params->mac_valid) {
+ DP_NOTICE(
+ cdev,
+ "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+ return -EINVAL;
+ }
+
+ memset(&ucast, 0, sizeof(ucast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ ucast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ ucast.opcode = QED_FILTER_REMOVE;
+ break;
+ case QED_FILTER_XCAST_TYPE_REPLACE:
+ ucast.opcode = QED_FILTER_REPLACE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
+ params->type);
+ }
+
+ if (params->vlan_valid && params->mac_valid) {
+ ucast.type = QED_FILTER_MAC_VLAN;
+ ether_addr_copy(ucast.mac, params->mac);
+ ucast.vlan = params->vlan;
+ } else if (params->mac_valid) {
+ ucast.type = QED_FILTER_MAC;
+ ether_addr_copy(ucast.mac, params->mac);
+ } else {
+ ucast.type = QED_FILTER_VLAN;
+ ucast.vlan = params->vlan;
+ }
+
+ ucast.is_rx_filter = true;
+ ucast.is_tx_filter = true;
+
+ return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_mcast(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params)
+{
+ struct qed_filter_mcast mcast;
+ int i;
+
+ memset(&mcast, 0, sizeof(mcast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ mcast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ mcast.opcode = QED_FILTER_REMOVE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
+ params->type);
+ }
+
+ mcast.num_mc_addrs = params->num;
+ for (i = 0; i < mcast.num_mc_addrs; i++)
+ ether_addr_copy(mcast.mac[i], params->mac[i]);
+
+ return qed_filter_mcast_cmd(cdev, &mcast,
+ QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter(struct qed_dev *cdev,
+ struct qed_filter_params *params)
+{
+ enum qed_filter_rx_mode_type accept_flags;
+
+ switch (params->type) {
+ case QED_FILTER_TYPE_UCAST:
+ return qed_configure_filter_ucast(cdev, &params->filter.ucast);
+ case QED_FILTER_TYPE_MCAST:
+ return qed_configure_filter_mcast(cdev, &params->filter.mcast);
+ case QED_FILTER_TYPE_RX_MODE:
+ accept_flags = params->filter.accept_flags;
+ return qed_configure_filter_rx_mode(cdev, accept_flags);
+ default:
+ DP_NOTICE(cdev, "Unknown filter type %d\n",
+ (int)params->type);
+ return -EINVAL;
+ }
+}
+
+static int qed_fp_cqe_completion(struct qed_dev *dev,
+ u8 rss_id,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
+ cqe);
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_eth_dev_info,
+ .register_ops = &qed_register_eth_ops,
+ .vport_start = &qed_start_vport,
+ .vport_stop = &qed_stop_vport,
+ .vport_update = &qed_update_vport,
+ .q_rx_start = &qed_start_rxq,
+ .q_rx_stop = &qed_stop_rxq,
+ .q_tx_start = &qed_start_txq,
+ .q_tx_stop = &qed_stop_txq,
+ .filter_config = &qed_configure_filter,
+ .fastpath_stop = &qed_fastpath_stop,
+ .eth_cqe_completion = &qed_fp_cqe_completion,
+ .get_vport_stats = &qed_get_vport_stats,
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+{
+ if (version != QED_ETH_INTERFACE_VERSION) {
+ pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
+ version, QED_ETH_INTERFACE_VERSION);
+ return NULL;
+ }
+
+ return &qed_eth_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_eth_ops);
+
+void qed_put_eth_ops(void)
+{
+ /* TODO - reference count for module? */
+}
+EXPORT_SYMBOL(qed_put_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
new file mode 100644
index 000000000000..947c7af72b25
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -0,0 +1,1169 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/stddef.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/qed/qed_if.h>
+
+#include "qed.h"
+#include "qed_sp.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_hw.h"
+
+static const char version[] =
+ "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define FW_FILE_VERSION \
+ __stringify(FW_MAJOR_VERSION) "." \
+ __stringify(FW_MINOR_VERSION) "." \
+ __stringify(FW_REVISION_VERSION) "." \
+ __stringify(FW_ENGINEERING_VERSION)
+
+#define QED_FW_FILE_NAME \
+ "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+
+static int __init qed_init(void)
+{
+ pr_notice("qed_init called\n");
+
+ pr_info("%s", version);
+
+ return 0;
+}
+
+static void __exit qed_cleanup(void)
+{
+ pr_notice("qed_cleanup called\n");
+}
+
+module_init(qed_init);
+module_exit(qed_cleanup);
+
+/* Check if the DMA controller on the machine can properly handle the DMA
+ * addressing required by the device.
+*/
+static int qed_set_coherency_mask(struct qed_dev *cdev)
+{
+ struct device *dev = &cdev->pdev->dev;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+ if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+ DP_NOTICE(cdev,
+ "Can't request 64-bit consistent allocations\n");
+ return -EIO;
+ }
+ } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qed_free_pci(struct qed_dev *cdev)
+{
+ struct pci_dev *pdev = cdev->pdev;
+
+ if (cdev->doorbells)
+ iounmap(cdev->doorbells);
+ if (cdev->regview)
+ iounmap(cdev->regview);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/* Performs PCI initializations as well as initializing PCI-related parameters
+ * in the device structrue. Returns 0 in case of success.
+ */
+static int qed_init_pci(struct qed_dev *cdev,
+ struct pci_dev *pdev)
+{
+ int rc;
+
+ cdev->pdev = pdev;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Cannot enable PCI device\n");
+ goto err0;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #0\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #2\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (atomic_read(&pdev->enable_cnt) == 1) {
+ rc = pci_request_regions(pdev, "qed");
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to request PCI memory resources\n");
+ goto err1;
+ }
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+ }
+
+ if (!pci_is_pcie(pdev)) {
+ DP_NOTICE(cdev, "The bus is not PCI Express\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (cdev->pci_params.pm_cap == 0)
+ DP_NOTICE(cdev, "Cannot find power management capability\n");
+
+ rc = qed_set_coherency_mask(cdev);
+ if (rc)
+ goto err2;
+
+ cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
+ cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
+ cdev->pci_params.irq = pdev->irq;
+
+ cdev->regview = pci_ioremap_bar(pdev, 0);
+ if (!cdev->regview) {
+ DP_NOTICE(cdev, "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+
+err2:
+ pci_release_regions(pdev);
+err1:
+ pci_disable_device(pdev);
+err0:
+ return rc;
+}
+
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info)
+{
+ struct qed_ptt *ptt;
+
+ memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+ dev_info->num_hwfns = cdev->num_hwfns;
+ dev_info->pci_mem_start = cdev->pci_params.mem_start;
+ dev_info->pci_mem_end = cdev->pci_params.mem_end;
+ dev_info->pci_irq = cdev->pci_params.irq;
+ dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
+ ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->mf_mode = cdev->mf_mode;
+
+ qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+
+ ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (ptt) {
+ qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->flash_size);
+
+ qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ }
+
+ return 0;
+}
+
+static void qed_free_cdev(struct qed_dev *cdev)
+{
+ kfree((void *)cdev);
+}
+
+static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
+{
+ struct qed_dev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return cdev;
+
+ qed_init_struct(cdev);
+
+ return cdev;
+}
+
+/* Sets the requested power state */
+static int qed_set_power_state(struct qed_dev *cdev,
+ pci_power_t state)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
+ return 0;
+}
+
+/* probing */
+static struct qed_dev *qed_probe(struct pci_dev *pdev,
+ enum qed_protocol protocol,
+ u32 dp_module,
+ u8 dp_level)
+{
+ struct qed_dev *cdev;
+ int rc;
+
+ cdev = qed_alloc_cdev(pdev);
+ if (!cdev)
+ goto err0;
+
+ cdev->protocol = protocol;
+
+ qed_init_dp(cdev, dp_module, dp_level);
+
+ rc = qed_init_pci(cdev, pdev);
+ if (rc) {
+ DP_ERR(cdev, "init pci failed\n");
+ goto err1;
+ }
+ DP_INFO(cdev, "PCI init completed successfully\n");
+
+ rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
+ if (rc) {
+ DP_ERR(cdev, "hw prepare failed\n");
+ goto err2;
+ }
+
+ DP_INFO(cdev, "qed_probe completed successffuly\n");
+
+ return cdev;
+
+err2:
+ qed_free_pci(cdev);
+err1:
+ qed_free_cdev(cdev);
+err0:
+ return NULL;
+}
+
+static void qed_remove(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return;
+
+ qed_hw_remove(cdev);
+
+ qed_free_pci(cdev);
+
+ qed_set_power_state(cdev, PCI_D3hot);
+
+ qed_free_cdev(cdev);
+}
+
+static void qed_disable_msix(struct qed_dev *cdev)
+{
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ pci_disable_msix(cdev->pdev);
+ kfree(cdev->int_params.msix_table);
+ } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
+ pci_disable_msi(cdev->pdev);
+ }
+
+ memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
+}
+
+static int qed_enable_msix(struct qed_dev *cdev,
+ struct qed_int_params *int_params)
+{
+ int i, rc, cnt;
+
+ cnt = int_params->in.num_vectors;
+
+ for (i = 0; i < cnt; i++)
+ int_params->msix_table[i].entry = i;
+
+ rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
+ int_params->in.min_msix_cnt, cnt);
+ if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
+ (rc % cdev->num_hwfns)) {
+ pci_disable_msix(cdev->pdev);
+
+ /* If fastpath is initialized, we need at least one interrupt
+ * per hwfn [and the slow path interrupts]. New requested number
+ * should be a multiple of the number of hwfns.
+ */
+ cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
+ DP_NOTICE(cdev,
+ "Trying to enable MSI-X with less vectors (%d out of %d)\n",
+ cnt, int_params->in.num_vectors);
+ rc = pci_enable_msix_exact(cdev->pdev,
+ int_params->msix_table, cnt);
+ if (!rc)
+ rc = cnt;
+ }
+
+ if (rc > 0) {
+ /* MSI-x configuration was achieved */
+ int_params->out.int_mode = QED_INT_MODE_MSIX;
+ int_params->out.num_vectors = rc;
+ rc = 0;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
+ cnt, rc);
+ }
+
+ return rc;
+}
+
+/* This function outputs the int mode and the number of enabled msix vector */
+static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
+{
+ struct qed_int_params *int_params = &cdev->int_params;
+ struct msix_entry *tbl;
+ int rc = 0, cnt;
+
+ switch (int_params->in.int_mode) {
+ case QED_INT_MODE_MSIX:
+ /* Allocate MSIX table */
+ cnt = int_params->in.num_vectors;
+ int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
+ if (!int_params->msix_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Enable MSIX */
+ rc = qed_enable_msix(cdev, int_params);
+ if (!rc)
+ goto out;
+
+ DP_NOTICE(cdev, "Failed to enable MSI-X\n");
+ kfree(int_params->msix_table);
+ if (force_mode)
+ goto out;
+ /* Fallthrough */
+
+ case QED_INT_MODE_MSI:
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
+
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ /* Fallthrough */
+
+ case QED_INT_MODE_INTA:
+ int_params->out.int_mode = QED_INT_MODE_INTA;
+ rc = 0;
+ goto out;
+ default:
+ DP_NOTICE(cdev, "Unknown int_mode value %d\n",
+ int_params->in.int_mode);
+ rc = -EINVAL;
+ }
+
+out:
+ cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
+
+ return rc;
+}
+
+static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
+ int index, void(*handler)(void *))
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ hwfn->simd_proto_handler[relative_idx].func = handler;
+ hwfn->simd_proto_handler[relative_idx].token = token;
+}
+
+static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ memset(&hwfn->simd_proto_handler[relative_idx], 0,
+ sizeof(struct qed_simd_fp_handler));
+}
+
+static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qed_single_int(int irq, void *dev_instance)
+{
+ struct qed_dev *cdev = (struct qed_dev *)dev_instance;
+ struct qed_hwfn *hwfn;
+ irqreturn_t rc = IRQ_NONE;
+ u64 status;
+ int i, j;
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
+
+ if (!status)
+ continue;
+
+ hwfn = &cdev->hwfns[i];
+
+ /* Slowpath interrupt */
+ if (unlikely(status & 0x1)) {
+ tasklet_schedule(hwfn->sp_dpc);
+ status &= ~0x1;
+ rc = IRQ_HANDLED;
+ }
+
+ /* Fastpath interrupts */
+ for (j = 0; j < 64; j++) {
+ if ((0x2ULL << j) & status) {
+ hwfn->simd_proto_handler[j].func(
+ hwfn->simd_proto_handler[j].token);
+ status &= ~(0x2ULL << j);
+ rc = IRQ_HANDLED;
+ }
+ }
+
+ if (unlikely(status))
+ DP_VERBOSE(hwfn, NETIF_MSG_INTR,
+ "got an unknown interrupt status 0x%llx\n",
+ status);
+ }
+
+ return rc;
+}
+
+static int qed_slowpath_irq_req(struct qed_dev *cdev)
+{
+ int i = 0, rc = 0;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ /* Request all the slowpath MSI-X vectors */
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ snprintf(cdev->hwfns[i].name, NAME_SIZE,
+ "sp-%d-%02x:%02x.%02x",
+ i, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn),
+ cdev->hwfns[i].abs_pf_id);
+
+ rc = request_irq(cdev->int_params.msix_table[i].vector,
+ qed_msix_sp_int, 0,
+ cdev->hwfns[i].name,
+ cdev->hwfns[i].sp_dpc);
+ if (rc)
+ break;
+
+ DP_VERBOSE(&cdev->hwfns[i],
+ (NETIF_MSG_INTR | QED_MSG_SP),
+ "Requested slowpath MSI-X\n");
+ }
+
+ if (i != cdev->num_hwfns) {
+ /* Free already request MSI-X vectors */
+ for (i--; i >= 0; i--) {
+ unsigned int vec =
+ cdev->int_params.msix_table[i].vector;
+ synchronize_irq(vec);
+ free_irq(cdev->int_params.msix_table[i].vector,
+ cdev->hwfns[i].sp_dpc);
+ }
+ }
+ } else {
+ unsigned long flags = 0;
+
+ snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
+ cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
+ PCI_FUNC(cdev->pdev->devfn));
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
+ flags |= IRQF_SHARED;
+
+ rc = request_irq(cdev->pdev->irq, qed_single_int,
+ flags, cdev->name, cdev);
+ }
+
+ return rc;
+}
+
+static void qed_slowpath_irq_free(struct qed_dev *cdev)
+{
+ int i;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i) {
+ synchronize_irq(cdev->int_params.msix_table[i].vector);
+ free_irq(cdev->int_params.msix_table[i].vector,
+ cdev->hwfns[i].sp_dpc);
+ }
+ } else {
+ free_irq(cdev->pdev->irq, cdev);
+ }
+}
+
+static int qed_nic_stop(struct qed_dev *cdev)
+{
+ int i, rc;
+
+ rc = qed_hw_stop(cdev);
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(p_hwfn->sp_dpc);
+ p_hwfn->b_sp_dpc_enabled = false;
+ DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
+ "Disabled sp taskelt [hwfn %d] at %p\n",
+ i, p_hwfn->sp_dpc);
+ }
+ }
+
+ return rc;
+}
+
+static int qed_nic_reset(struct qed_dev *cdev)
+{
+ int rc;
+
+ rc = qed_hw_reset(cdev);
+ if (rc)
+ return rc;
+
+ qed_resc_free(cdev);
+
+ return 0;
+}
+
+static int qed_nic_setup(struct qed_dev *cdev)
+{
+ int rc;
+
+ rc = qed_resc_alloc(cdev);
+ if (rc)
+ return rc;
+
+ DP_INFO(cdev, "Allocated qed resources\n");
+
+ qed_resc_setup(cdev);
+
+ return rc;
+}
+
+static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
+ limit = cdev->num_hwfns * 63;
+ else if (cdev->int_params.fp_msix_cnt)
+ limit = cdev->int_params.fp_msix_cnt;
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(struct qed_int_info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ /* Need to expose only MSI-X information; Single IRQ is handled solely
+ * by qed.
+ */
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.fp_msix_base;
+
+ info->msix_cnt = cdev->int_params.fp_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+ }
+
+ return 0;
+}
+
+static int qed_slowpath_setup_int(struct qed_dev *cdev,
+ enum qed_int_mode int_mode)
+{
+ int rc, i;
+ u8 num_vectors = 0;
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+
+ cdev->int_params.in.int_mode = int_mode;
+ for_each_hwfn(cdev, i)
+ num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
+ cdev->int_params.in.num_vectors = num_vectors;
+
+ /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+
+ rc = qed_set_int_mode(cdev, false);
+ if (rc) {
+ DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+ return rc;
+ }
+
+ cdev->int_params.fp_msix_base = cdev->num_hwfns;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
+ cdev->num_hwfns;
+
+ return 0;
+}
+
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+ int rc;
+
+ p_hwfn->stream->next_in = input_buf;
+ p_hwfn->stream->avail_in = input_len;
+ p_hwfn->stream->next_out = unzip_buf;
+ p_hwfn->stream->avail_out = max_size;
+
+ rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+ if (rc != Z_OK) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
+ rc);
+ return 0;
+ }
+
+ rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
+ zlib_inflateEnd(p_hwfn->stream);
+
+ if (rc != Z_OK && rc != Z_STREAM_END) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
+ p_hwfn->stream->msg, rc);
+ return 0;
+ }
+
+ return p_hwfn->stream->total_out / 4;
+}
+
+static int qed_alloc_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+ void *workspace;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
+ if (!p_hwfn->stream)
+ return -ENOMEM;
+
+ workspace = vzalloc(zlib_inflate_workspacesize());
+ if (!workspace)
+ return -ENOMEM;
+ p_hwfn->stream->workspace = workspace;
+ }
+
+ return 0;
+}
+
+static void qed_free_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!p_hwfn->stream)
+ return;
+
+ vfree(p_hwfn->stream->workspace);
+ kfree(p_hwfn->stream);
+ }
+}
+
+static void qed_update_pf_params(struct qed_dev *cdev,
+ struct qed_pf_params *params)
+{
+ int i;
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->pf_params = *params;
+ }
+}
+
+static int qed_slowpath_start(struct qed_dev *cdev,
+ struct qed_slowpath_params *params)
+{
+ struct qed_mcp_drv_version drv_version;
+ const u8 *data = NULL;
+ struct qed_hwfn *hwfn;
+ int rc;
+
+ rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+ &cdev->pdev->dev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to find fw file - /lib/firmware/%s\n",
+ QED_FW_FILE_NAME);
+ goto err;
+ }
+
+ rc = qed_nic_setup(cdev);
+ if (rc)
+ goto err;
+
+ rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ if (rc)
+ goto err1;
+
+ /* Request the slowpath IRQ */
+ rc = qed_slowpath_irq_req(cdev);
+ if (rc)
+ goto err2;
+
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(cdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ goto err3;
+ }
+
+ /* Start the slowpath */
+ data = cdev->firmware->data;
+
+ rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+ true, data);
+ if (rc)
+ goto err3;
+
+ DP_INFO(cdev,
+ "HW initialization and function start completed successfully\n");
+
+ hwfn = QED_LEADING_HWFN(cdev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) |
+ (params->drv_eng);
+ strlcpy(drv_version.name, params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+ return rc;
+ }
+
+ return 0;
+
+err3:
+ qed_free_stream_mem(cdev);
+ qed_slowpath_irq_free(cdev);
+err2:
+ qed_disable_msix(cdev);
+err1:
+ qed_resc_free(cdev);
+err:
+ release_firmware(cdev->firmware);
+
+ return rc;
+}
+
+static int qed_slowpath_stop(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ qed_free_stream_mem(cdev);
+
+ qed_nic_stop(cdev);
+ qed_slowpath_irq_free(cdev);
+
+ qed_disable_msix(cdev);
+ qed_nic_reset(cdev);
+
+ release_firmware(cdev->firmware);
+
+ return 0;
+}
+
+static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
+ char ver_str[VER_SIZE])
+{
+ int i;
+
+ memcpy(cdev->name, name, NAME_SIZE);
+ for_each_hwfn(cdev, i)
+ snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+
+ memcpy(cdev->ver_str, ver_str, VER_SIZE);
+ cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static u32 qed_sb_init(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id,
+ enum qed_sb_type type)
+{
+ struct qed_hwfn *p_hwfn;
+ int hwfn_index;
+ u16 rel_sb_id;
+ u8 n_hwfns;
+ u32 rc;
+
+ /* RoCE uses single engine and CMT uses two engines. When using both
+ * we force only a single engine. Storage uses only engine 0 too.
+ */
+ if (type == QED_SB_TYPE_L2_QUEUE)
+ n_hwfns = cdev->num_hwfns;
+ else
+ n_hwfns = 1;
+
+ hwfn_index = sb_id % n_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / n_hwfns;
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+ sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+ return rc;
+}
+
+static u32 qed_sb_release(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ struct qed_hwfn *p_hwfn;
+ int hwfn_index;
+ u16 rel_sb_id;
+ u32 rc;
+
+ hwfn_index = sb_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
+
+ return rc;
+}
+
+static int qed_set_link(struct qed_dev *cdev,
+ struct qed_link_params *params)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_mcp_link_params *link_params;
+ struct qed_ptt *ptt;
+ int rc;
+
+ if (!cdev)
+ return -ENODEV;
+
+ /* The link should be set only once per PF */
+ hwfn = &cdev->hwfns[0];
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ link_params = qed_mcp_get_link_params(hwfn);
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+ link_params->speed.autoneg = params->autoneg;
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+ link_params->speed.advertised_speeds = 0;
+ if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
+ (params->adv_speeds & SUPPORTED_1000baseT_Full))
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ if (params->adv_speeds & 0)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+ if (params->adv_speeds & 0)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
+ }
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
+ link_params->speed.forced_speed = params->forced_speed;
+
+ rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_get_port_type(u32 media_type)
+{
+ int port_type;
+
+ switch (media_type) {
+ case MEDIA_SFPP_10G_FIBER:
+ case MEDIA_SFP_1G_FIBER:
+ case MEDIA_XFP_FIBER:
+ case MEDIA_KR:
+ port_type = PORT_FIBRE;
+ break;
+ case MEDIA_DA_TWINAX:
+ port_type = PORT_DA;
+ break;
+ case MEDIA_BASE_T:
+ port_type = PORT_TP;
+ break;
+ case MEDIA_NOT_PRESENT:
+ port_type = PORT_NONE;
+ break;
+ case MEDIA_UNSPECIFIED:
+ default:
+ port_type = PORT_OTHER;
+ break;
+ }
+ return port_type;
+}
+
+static void qed_fill_link(struct qed_hwfn *hwfn,
+ struct qed_link_output *if_link)
+{
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ struct qed_mcp_link_capabilities link_caps;
+ u32 media_type;
+
+ memset(if_link, 0, sizeof(*if_link));
+
+ /* Prepare source inputs */
+ memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+ memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
+ sizeof(link_caps));
+
+ /* Set the link parameters to pass to protocol driver */
+ if (link.link_up)
+ if_link->link_up = true;
+
+ /* TODO - at the moment assume supported and advertised speed equal */
+ if_link->supported_caps = SUPPORTED_FIBRE;
+ if (params.speed.autoneg)
+ if_link->supported_caps |= SUPPORTED_Autoneg;
+ if (params.pause.autoneg ||
+ (params.pause.forced_rx && params.pause.forced_tx))
+ if_link->supported_caps |= SUPPORTED_Asym_Pause;
+ if (params.pause.autoneg || params.pause.forced_rx ||
+ params.pause.forced_tx)
+ if_link->supported_caps |= SUPPORTED_Pause;
+
+ if_link->advertised_caps = if_link->supported_caps;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->advertised_caps |= 0;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ if_link->advertised_caps |= 0;
+
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ if_link->supported_caps |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->supported_caps |= 0;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ if_link->supported_caps |= 0;
+
+ if (link.link_up)
+ if_link->speed = link.speed;
+
+ /* TODO - fill duplex properly */
+ if_link->duplex = DUPLEX_FULL;
+ qed_mcp_get_media_type(hwfn->cdev, &media_type);
+ if_link->port = qed_get_port_type(media_type);
+
+ if_link->autoneg = params.speed.autoneg;
+
+ if (params.pause.autoneg)
+ if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+ if (params.pause.forced_rx)
+ if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+ if (params.pause.forced_tx)
+ if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+ /* Link partner capabilities */
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_HD)
+ if_link->lp_caps |= SUPPORTED_1000baseT_Half;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_FD)
+ if_link->lp_caps |= SUPPORTED_1000baseT_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_10G)
+ if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_40G)
+ if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_50G)
+ if_link->lp_caps |= 0;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_100G)
+ if_link->lp_caps |= 0;
+
+ if (link.an_complete)
+ if_link->lp_caps |= SUPPORTED_Autoneg;
+
+ if (link.partner_adv_pause)
+ if_link->lp_caps |= SUPPORTED_Pause;
+ if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
+ link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
+ if_link->lp_caps |= SUPPORTED_Asym_Pause;
+}
+
+static void qed_get_current_link(struct qed_dev *cdev,
+ struct qed_link_output *if_link)
+{
+ qed_fill_link(&cdev->hwfns[0], if_link);
+}
+
+void qed_link_update(struct qed_hwfn *hwfn)
+{
+ void *cookie = hwfn->cdev->ops_cookie;
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+ struct qed_link_output if_link;
+
+ qed_fill_link(hwfn, &if_link);
+
+ if (IS_LEAD_HWFN(hwfn) && cookie)
+ op->link_update(cookie, &if_link);
+}
+
+static int qed_drain(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int i, rc;
+
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_drain(hwfn, ptt);
+ if (rc)
+ return rc;
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ return 0;
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+ .probe = &qed_probe,
+ .remove = &qed_remove,
+ .set_power_state = &qed_set_power_state,
+ .set_id = &qed_set_id,
+ .update_pf_params = &qed_update_pf_params,
+ .slowpath_start = &qed_slowpath_start,
+ .slowpath_stop = &qed_slowpath_stop,
+ .set_fp_int = &qed_set_int_fp,
+ .get_fp_int = &qed_get_int_fp,
+ .sb_init = &qed_sb_init,
+ .sb_release = &qed_sb_release,
+ .simd_handler_config = &qed_simd_handler_config,
+ .simd_handler_clean = &qed_simd_handler_clean,
+ .set_link = &qed_set_link,
+ .get_link = &qed_get_current_link,
+ .drain = &qed_drain,
+ .update_msglvl = &qed_init_dp,
+ .chain_alloc = &qed_chain_alloc,
+ .chain_free = &qed_chain_free,
+};
+
+u32 qed_get_protocol_version(enum qed_protocol protocol)
+{
+ switch (protocol) {
+ case QED_PROTOCOL_ETH:
+ return QED_ETH_INTERFACE_VERSION;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
new file mode 100644
index 000000000000..20d048cdcb88
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#define CHIP_MCP_RESP_ITER_US 10
+
+#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
+#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+ DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+ return false;
+ return true;
+}
+
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PORT);
+ u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
+
+ p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+ MFW_PORT(p_hwfn));
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "port_addr = 0x%x, port_id 0x%02x\n",
+ p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+ u32 tmp, i;
+
+ if (!p_hwfn->mcp_info->public_base)
+ return;
+
+ for (i = 0; i < length; i++) {
+ tmp = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->mfw_mb_addr +
+ (i << 2) + sizeof(u32));
+
+ /* The MB data is actually BE; Need to force it to cpu */
+ ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+ be32_to_cpu((__force __be32)tmp);
+ }
+}
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->mcp_info) {
+ kfree(p_hwfn->mcp_info->mfw_mb_cur);
+ kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+ }
+ kfree(p_hwfn->mcp_info);
+
+ return 0;
+}
+
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+ u32 drv_mb_offsize, mfw_mb_offsize;
+ u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+ p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base)
+ return 0;
+
+ p_info->public_base |= GRCBASE_MCP;
+
+ /* Calculate the driver and MFW mailbox address */
+ drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_DRV_MB));
+ p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+ drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+ /* Set the MFW MB address */
+ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
+
+ /* Get the current driver mailbox sequence before sending
+ * the first command
+ */
+ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Get current FW pulse sequence */
+ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK;
+
+ p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ return 0;
+}
+
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info;
+ u32 size;
+
+ /* Allocate mcp_info structure */
+ p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
+ if (!p_hwfn->mcp_info)
+ goto err;
+ p_info = p_hwfn->mcp_info;
+
+ if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
+ DP_NOTICE(p_hwfn, "MCP is not initialized\n");
+ /* Do not free mcp_info here, since public_base indicate that
+ * the MCP is not initialized
+ */
+ return 0;
+ }
+
+ size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+ p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
+ p_info->mfw_mb_shadow =
+ kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
+ p_info->mfw_mb_length), GFP_ATOMIC);
+ if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ goto err;
+
+ /* Initialize the MFW mutex */
+ mutex_init(&p_info->mutex);
+
+ return 0;
+
+err:
+ DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
+ qed_mcp_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ u8 delay = CHIP_MCP_RESP_ITER_US;
+ u32 org_mcp_reset_seq, cnt = 0;
+ int rc = 0;
+
+ /* Set drv command along with the updated sequence */
+ org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
+ (DRV_MSG_CODE_MCP_RESET | seq));
+
+ do {
+ /* Wait for MFW response */
+ udelay(delay);
+ /* Give the FW up to 500 second (50*1000*10usec) */
+ } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0)) &&
+ (cnt++ < QED_MCP_RESET_RETRIES));
+
+ if (org_mcp_reset_seq !=
+ qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "MCP was reset after %d usec\n", cnt * delay);
+ } else {
+ DP_ERR(p_hwfn, "Failed to reset MCP\n");
+ rc = -EAGAIN;
+ }
+
+ return rc;
+}
+
+static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ u8 delay = CHIP_MCP_RESP_ITER_US;
+ u32 seq, cnt = 1, actual_mb_seq;
+ int rc = 0;
+
+ /* Get actual driver mailbox sequence */
+ actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Use MCP history register to check if MCP reset occurred between
+ * init time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist !=
+ qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
+ qed_load_mcp_offsets(p_hwfn, p_ptt);
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+ /* Set drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+ /* Set drv command along with the updated sequence */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "wrote command (%x) to MFW MB param 0x%08x\n",
+ (cmd | seq), param);
+
+ do {
+ /* Wait for MFW response */
+ udelay(delay);
+ *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+ (cnt++ < QED_DRV_MB_MAX_RETRIES));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt * delay, *o_mcp_resp, seq);
+
+ /* Is this a reply to our command? */
+ if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+ *o_mcp_resp &= FW_MSG_CODE_MASK;
+ /* Get the MCP param */
+ *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+ } else {
+ /* FW BUG! */
+ DP_ERR(p_hwfn, "MFW failed to respond!\n");
+ *o_mcp_resp = 0;
+ rc = -EAGAIN;
+ }
+ return rc;
+}
+
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ int rc = 0;
+
+ /* MCP not initialized */
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Lock Mutex to ensure only single thread is
+ * accessing the MCP at one time
+ */
+ mutex_lock(&p_hwfn->mcp_info->mutex);
+ rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param);
+ /* Release Mutex */
+ mutex_unlock(&p_hwfn->mcp_info->mutex);
+
+ return rc;
+}
+
+static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 i;
+
+ /* Copy version string to MCP */
+ for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
+ DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
+ *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_load_code)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 param;
+ int rc;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Save driver's version to shmem */
+ qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+ p_hwfn->mcp_info->drv_mb_seq,
+ p_hwfn->mcp_info->drv_pulse_seq);
+
+ /* Load Request */
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
+ (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+ cdev->drv_type),
+ p_load_code, &param);
+
+ /* if mcp fails to respond we must abort */
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* If MFW refused (e.g. other port is in diagnostic mode) we
+ * must abort. This can happen in the following cases:
+ * - Other port is in diagnostic mode
+ * - Previously loaded function on the engine is not compliant with
+ * the requester.
+ * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+ * -
+ */
+ if (!(*p_load_code) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+ DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_reset)
+{
+ struct qed_mcp_link_state *p_link;
+ u32 status = 0;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+ memset(p_link, 0, sizeof(*p_link));
+ if (!b_reset) {
+ status = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, link_status));
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
+ "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
+ status,
+ (u32)(p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ link_status)));
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Resetting link indications\n");
+ return;
+ }
+
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+
+ p_link->full_duplex = true;
+ switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+ case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+ p_link->speed = 100000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+ p_link->speed = 50000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+ p_link->speed = 40000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+ p_link->speed = 25000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+ p_link->speed = 20000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+ p_link->speed = 10000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+ p_link->full_duplex = false;
+ /* Fall-through */
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+ p_link->speed = 1000;
+ break;
+ default:
+ p_link->speed = 0;
+ }
+
+ /* Correct speed according to bandwidth allocation */
+ if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
+ p_link->speed = p_link->speed *
+ p_hwfn->mcp_info->func_info.bandwidth_max /
+ 100;
+ qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_link->speed);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+ }
+
+ p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+ p_link->an_complete = !!(status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+ p_link->parallel_detection = !!(status &
+ LINK_STATUS_PARALLEL_DETECTION_USED);
+ p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_FD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_HD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_10G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_20G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_40G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_50G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_100G : 0;
+
+ p_link->partner_tx_flow_ctrl_en =
+ !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+ p_link->partner_rx_flow_ctrl_en =
+ !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+ switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+ case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
+ break;
+ default:
+ p_link->partner_adv_pause = 0;
+ }
+
+ p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+ qed_link_update(p_hwfn);
+}
+
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up)
+{
+ struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+ u32 param = 0, reply = 0, cmd;
+ struct pmm_phy_cfg phy_cfg;
+ int rc = 0;
+ u32 i;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Set the shmem configuration according to params */
+ memset(&phy_cfg, 0, sizeof(phy_cfg));
+ cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+ if (!params->speed.autoneg)
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
+
+ /* Write the requested configuration to shmem */
+ for (i = 0; i < sizeof(phy_cfg); i += 4)
+ qed_wr(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data) + i,
+ ((u32 *)&phy_cfg)[i >> 2]);
+
+ if (b_up) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
+ phy_cfg.speed,
+ phy_cfg.pause,
+ phy_cfg.adv_speed,
+ phy_cfg.loopback_mode,
+ phy_cfg.feature_config_flags);
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Resetting link\n");
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+ p_hwfn->mcp_info->drv_mb_seq,
+ p_hwfn->mcp_info->drv_pulse_seq);
+
+ /* Load Request */
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
+
+ /* if mcp fails to respond we must abort */
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* Reset the link status if needed */
+ if (!b_up)
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+ return 0;
+}
+
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *info = p_hwfn->mcp_info;
+ int rc = 0;
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
+
+ /* Read Messages from MFW */
+ qed_mcp_read_mb(p_hwfn, p_ptt);
+
+ /* Compare current messages to old ones */
+ for (i = 0; i < info->mfw_mb_length; i++) {
+ if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+ continue;
+
+ found = true;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+ i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+ switch (i) {
+ case MFW_DRV_MSG_LINK_CHANGE:
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+ rc = -EINVAL;
+ }
+ }
+
+ /* ACK everything */
+ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+ __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
+
+ /* MFW expect answer in BE, so we force write in that format */
+ qed_wr(p_hwfn, p_ptt,
+ info->mfw_mb_addr + sizeof(u32) +
+ MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+ sizeof(u32) + i * sizeof(u32),
+ (__force u32)val);
+ }
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Received an MFW message indication but no new message!\n");
+ rc = -EINVAL;
+ }
+
+ /* Copy the new mfw messages into the shadow */
+ memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+ return rc;
+}
+
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+ u32 *p_mfw_ver)
+{
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+ struct qed_ptt *p_ptt;
+ u32 global_offsize;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ global_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+ public_base,
+ PUBLIC_GLOBAL));
+ *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize, 0) +
+ offsetof(struct public_global, mfw_ver));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+ u32 *p_media_type)
+{
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+ struct qed_ptt *p_ptt;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ *p_media_type = MEDIA_UNSPECIFIED;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, media_type));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data),
+ QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
+}
+
+static int
+qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
+ struct public_func *p_info,
+ enum qed_pci_personality *p_proto)
+{
+ int rc = 0;
+
+ switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+ case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+ *p_proto = QED_PCI_ETH;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *info;
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ info = &p_hwfn->mcp_info->func_info;
+
+ info->pause_on_host = (shmem_info.config &
+ FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+ &info->protocol)) {
+ DP_ERR(p_hwfn, "Unknown personality %08x\n",
+ (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+ return -EINVAL;
+ }
+
+ if (p_hwfn->cdev->mf_mode != SF) {
+ info->bandwidth_min = (shmem_info.config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ info->bandwidth_min);
+ info->bandwidth_min = 1;
+ }
+
+ info->bandwidth_max = (shmem_info.config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ info->bandwidth_max);
+ info->bandwidth_max = 100;
+ }
+ }
+
+ if (shmem_info.mac_upper || shmem_info.mac_lower) {
+ info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+ info->mac[1] = (u8)(shmem_info.mac_upper);
+ info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+ info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+ info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+ info->mac[5] = (u8)(shmem_info.mac_lower);
+ } else {
+ DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
+ }
+
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+ (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+ (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
+ info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
+ "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
+ info->pause_on_host, info->protocol,
+ info->bandwidth_min, info->bandwidth_max,
+ info->mac[0], info->mac[1], info->mac[2],
+ info->mac[3], info->mac[4], info->mac[5],
+ info->wwn_port, info->wwn_node, info->ovlan);
+
+ return 0;
+}
+
+struct qed_mcp_link_params
+*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_input;
+}
+
+struct qed_mcp_link_state
+*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_output;
+}
+
+struct qed_mcp_link_capabilities
+*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_capabilities;
+}
+
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NIG_DRAIN, 100,
+ &resp, &param);
+
+ /* Wait for the drain to complete before returning */
+ msleep(120);
+
+ return rc;
+}
+
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size)
+{
+ u32 flash_size;
+
+ flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+ flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+ *p_flash_size = flash_size;
+
+ return 0;
+}
+
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver)
+{
+ int rc = 0;
+ u32 param = 0, reply = 0, i;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
+ p_ver->version);
+ /* Copy version string to shmem */
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
+ DRV_MB_WR(p_hwfn, p_ptt,
+ union_data.drv_version.name[i * sizeof(u32)],
+ *(u32 *)&p_ver->name[i * sizeof(u32)]);
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
+ &param);
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
new file mode 100644
index 000000000000..dbaae586b4a7
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -0,0 +1,369 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_MCP_H
+#define _QED_MCP_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "qed_hsi.h"
+
+struct qed_mcp_link_speed_params {
+ bool autoneg;
+ u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
+ u32 forced_speed; /* In Mb/s */
+};
+
+struct qed_mcp_link_pause_params {
+ bool autoneg;
+ bool forced_rx;
+ bool forced_tx;
+};
+
+struct qed_mcp_link_params {
+ struct qed_mcp_link_speed_params speed;
+ struct qed_mcp_link_pause_params pause;
+ u32 loopback_mode;
+};
+
+struct qed_mcp_link_capabilities {
+ u32 speed_capabilities;
+};
+
+struct qed_mcp_link_state {
+ bool link_up;
+
+ u32 speed; /* In Mb/s */
+ bool full_duplex;
+
+ bool an;
+ bool an_complete;
+ bool parallel_detection;
+ bool pfc_enabled;
+
+#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
+#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
+#define QED_LINK_PARTNER_SPEED_10G BIT(2)
+#define QED_LINK_PARTNER_SPEED_20G BIT(3)
+#define QED_LINK_PARTNER_SPEED_40G BIT(4)
+#define QED_LINK_PARTNER_SPEED_50G BIT(5)
+#define QED_LINK_PARTNER_SPEED_100G BIT(6)
+ u32 partner_adv_speed;
+
+ bool partner_tx_flow_ctrl_en;
+ bool partner_rx_flow_ctrl_en;
+
+#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define QED_LINK_PARTNER_BOTH_PAUSE (3)
+ u8 partner_adv_pause;
+
+ bool sfp_tx_fault;
+};
+
+struct qed_mcp_function_info {
+ u8 pause_on_host;
+
+ enum qed_pci_personality protocol;
+
+ u8 bandwidth_min;
+ u8 bandwidth_max;
+
+ u8 mac[ETH_ALEN];
+
+ u64 wwn_port;
+ u64 wwn_node;
+
+#define QED_MCP_VLAN_UNSET (0xffff)
+ u16 ovlan;
+};
+
+struct qed_mcp_nvm_common {
+ u32 offset;
+ u32 param;
+ u32 resp;
+ u32 cmd;
+};
+
+struct qed_mcp_drv_version {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct qed_mcp_link_capabilities
+ *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return int
+ */
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param cdev - qed dev pointer
+ * @param mfw_ver - mfw version value
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+ u32 *mfw_ver);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param cdev - qed dev pointer
+ * @param mfw_ver - media type value
+ *
+ * @return int -
+ * 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+ u32 *media_type);
+
+/**
+ * @brief General function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 5ms.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param cmd - command to be sent to the MCP.
+ * @param param - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence).
+ * @param o_mcp_param- Optional parameter provided by the MCP
+ * response
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ * (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size - flash size in bytes to be filled.
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return int - 0 - operation was successul.
+ */
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver);
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in qed_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
+ ((rel_pfid) | \
+ ((p_hwfn)->abs_pf_id & 1) << 3) : \
+ rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+struct qed_mcp_info {
+ struct mutex mutex; /* MCP access lock */
+ u32 public_base;
+ u32 drv_mb_addr;
+ u32 mfw_mb_addr;
+ u32 port_addr;
+ u16 drv_mb_seq;
+ u16 drv_pulse_seq;
+ struct qed_mcp_link_params link_input;
+ struct qed_mcp_link_state link_output;
+ struct qed_mcp_link_capabilities link_capabilities;
+ struct qed_mcp_function_info func_info;
+ u8 *mfw_mb_cur;
+ u8 *mfw_mb_shadow;
+ u16 mfw_mb_length;
+ u16 mcp_hist;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ * succeed, returns whether this PF is the first on the
+ * chip/engine/port or function. This function should be
+ * called when driver is ready to accept MFW events after
+ * Storms initializations are done.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_load_code - The MCP response param containing one
+ * of the following:
+ * FW_MSG_CODE_DRV_LOAD_ENGINE
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return int -
+ * 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
new file mode 100644
index 000000000000..7a5ce5914ace
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -0,0 +1,366 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef REG_ADDR_H
+#define REG_ADDR_H
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+ 0
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
+ 0xfff << 0)
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+ 12
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
+ 0xfff << 12)
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+ 24
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
+ 0xff << 24)
+
+#define XSDM_REG_OPERATION_GEN \
+ 0xf80408UL
+#define NIG_REG_RX_BRB_OUT_EN \
+ 0x500e18UL
+#define NIG_REG_STORM_OUT_EN \
+ 0x500e08UL
+#define PSWRQ2_REG_L2P_VALIDATE_VFID \
+ 0x240c50UL
+#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
+ 0x2aae04UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x2aa16cUL
+#define BAR0_MAP_REG_MSDM_RAM \
+ 0x1d00000UL
+#define BAR0_MAP_REG_USDM_RAM \
+ 0x1d80000UL
+#define BAR0_MAP_REG_PSDM_RAM \
+ 0x1f00000UL
+#define BAR0_MAP_REG_TSDM_RAM \
+ 0x1c80000UL
+#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+ 0x5011f4UL
+#define PRS_REG_SEARCH_TCP \
+ 0x1f0400UL
+#define PRS_REG_SEARCH_UDP \
+ 0x1f0404UL
+#define PRS_REG_SEARCH_FCOE \
+ 0x1f0408UL
+#define PRS_REG_SEARCH_ROCE \
+ 0x1f040cUL
+#define PRS_REG_SEARCH_OPENFLOW \
+ 0x1f0434UL
+#define TM_REG_PF_ENABLE_CONN \
+ 0x2c043cUL
+#define TM_REG_PF_ENABLE_TASK \
+ 0x2c0444UL
+#define TM_REG_PF_SCAN_ACTIVE_CONN \
+ 0x2c04fcUL
+#define TM_REG_PF_SCAN_ACTIVE_TASK \
+ 0x2c0500UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define QM_REG_USG_CNT_PF_TX \
+ 0x2f2eacUL
+#define QM_REG_USG_CNT_PF_OTHER \
+ 0x2f2eb0UL
+#define DORQ_REG_PF_DB_ENABLE \
+ 0x100508UL
+#define QM_REG_PF_EN \
+ 0x2f2ea4UL
+#define TCFC_REG_STRONG_ENABLE_PF \
+ 0x2d0708UL
+#define CCFC_REG_STRONG_ENABLE_PF \
+ 0x2e0708UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+ 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+ 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+ 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+ 0x2aa410UL
+#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+ 0x2aa138UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x2aa174UL
+#define MISC_REG_GEN_PURP_CR0 \
+ 0x008c80UL
+#define MCP_REG_SCRATCH \
+ 0xe20000UL
+#define CNIG_REG_NW_PORT_MODE_BB_B0 \
+ 0x218200UL
+#define MISCS_REG_CHIP_NUM \
+ 0x00976cUL
+#define MISCS_REG_CHIP_REV \
+ 0x009770UL
+#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
+ 0x00971cUL
+#define MISCS_REG_CHIP_TEST_REG \
+ 0x009778UL
+#define MISCS_REG_CHIP_METAL \
+ 0x009774UL
+#define BRB_REG_HEADER_SIZE \
+ 0x340804UL
+#define BTB_REG_HEADER_SIZE \
+ 0xdb0804UL
+#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
+ 0x1c0708UL
+#define CCFC_REG_ACTIVITY_COUNTER \
+ 0x2e8800UL
+#define CDU_REG_CID_ADDR_PARAMS \
+ 0x580900UL
+#define DBG_REG_CLIENT_ENABLE \
+ 0x010004UL
+#define DMAE_REG_INIT \
+ 0x00c000UL
+#define DORQ_REG_IFEN \
+ 0x100040UL
+#define GRC_REG_TIMEOUT_EN \
+ 0x050404UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x180040UL
+#define MCM_REG_INIT \
+ 0x1200000UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MISC_REG_PORT_MODE \
+ 0x008c00UL
+#define MISCS_REG_CLK_100G_MODE \
+ 0x009070UL
+#define MSDM_REG_ENABLE_IN1 \
+ 0xfc0004UL
+#define MSEM_REG_ENABLE_IN \
+ 0x1800004UL
+#define NIG_REG_CM_HDR \
+ 0x500840UL
+#define NCSI_REG_CONFIG \
+ 0x040200UL
+#define PBF_REG_INIT \
+ 0xd80000UL
+#define PTU_REG_ATC_INIT_ARRAY \
+ 0x560000UL
+#define PCM_REG_INIT \
+ 0x1100000UL
+#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
+ 0x2a9000UL
+#define PRM_REG_DISABLE_PRM \
+ 0x230000UL
+#define PRS_REG_SOFT_RST \
+ 0x1f0000UL
+#define PSDM_REG_ENABLE_IN1 \
+ 0xfa0004UL
+#define PSEM_REG_ENABLE_IN \
+ 0x1600004UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ2_REG_CDUT_P_SIZE \
+ 0x24000cUL
+#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
+ 0x2a0040UL
+#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+ 0x29e050UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD2_REG_CONF11 \
+ 0x29d064UL
+#define PSWWR_REG_USDM_FULL_TH \
+ 0x29a040UL
+#define PSWWR2_REG_CDU_FULL_TH2 \
+ 0x29b040UL
+#define QM_REG_MAXPQSIZE_0 \
+ 0x2f0434UL
+#define RSS_REG_RSS_INIT_EN \
+ 0x238804UL
+#define RDIF_REG_STOP_ON_ERROR \
+ 0x300040UL
+#define SRC_REG_SOFT_RST \
+ 0x23874cUL
+#define TCFC_REG_ACTIVITY_COUNTER \
+ 0x2d8800UL
+#define TCM_REG_INIT \
+ 0x1180000UL
+#define TM_REG_PXP_READ_DATA_FIFO_INIT \
+ 0x2c0014UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0xfb0004UL
+#define TSEM_REG_ENABLE_IN \
+ 0x1700004UL
+#define TDIF_REG_STOP_ON_ERROR \
+ 0x310040UL
+#define UCM_REG_INIT \
+ 0x1280000UL
+#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+ 0x051004UL
+#define USDM_REG_ENABLE_IN1 \
+ 0xfd0004UL
+#define USEM_REG_ENABLE_IN \
+ 0x1900004UL
+#define XCM_REG_INIT \
+ 0x1000000UL
+#define XSDM_REG_ENABLE_IN1 \
+ 0xf80004UL
+#define XSEM_REG_ENABLE_IN \
+ 0x1400004UL
+#define YCM_REG_INIT \
+ 0x1080000UL
+#define YSDM_REG_ENABLE_IN1 \
+ 0xf90004UL
+#define YSEM_REG_ENABLE_IN \
+ 0x1500004UL
+#define XYLD_REG_SCBD_STRICT_PRIO \
+ 0x4c0000UL
+#define TMLD_REG_SCBD_STRICT_PRIO \
+ 0x4d0000UL
+#define MULD_REG_SCBD_STRICT_PRIO \
+ 0x4e0000UL
+#define YULD_REG_SCBD_STRICT_PRIO \
+ 0x4c8000UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0x008c20UL
+#define DMAE_REG_GO_C0 \
+ 0x00c048UL
+#define DMAE_REG_GO_C1 \
+ 0x00c04cUL
+#define DMAE_REG_GO_C2 \
+ 0x00c050UL
+#define DMAE_REG_GO_C3 \
+ 0x00c054UL
+#define DMAE_REG_GO_C4 \
+ 0x00c058UL
+#define DMAE_REG_GO_C5 \
+ 0x00c05cUL
+#define DMAE_REG_GO_C6 \
+ 0x00c060UL
+#define DMAE_REG_GO_C7 \
+ 0x00c064UL
+#define DMAE_REG_GO_C8 \
+ 0x00c068UL
+#define DMAE_REG_GO_C9 \
+ 0x00c06cUL
+#define DMAE_REG_GO_C10 \
+ 0x00c070UL
+#define DMAE_REG_GO_C11 \
+ 0x00c074UL
+#define DMAE_REG_GO_C12 \
+ 0x00c078UL
+#define DMAE_REG_GO_C13 \
+ 0x00c07cUL
+#define DMAE_REG_GO_C14 \
+ 0x00c080UL
+#define DMAE_REG_GO_C15 \
+ 0x00c084UL
+#define DMAE_REG_GO_C16 \
+ 0x00c088UL
+#define DMAE_REG_GO_C17 \
+ 0x00c08cUL
+#define DMAE_REG_GO_C18 \
+ 0x00c090UL
+#define DMAE_REG_GO_C19 \
+ 0x00c094UL
+#define DMAE_REG_GO_C20 \
+ 0x00c098UL
+#define DMAE_REG_GO_C21 \
+ 0x00c09cUL
+#define DMAE_REG_GO_C22 \
+ 0x00c0a0UL
+#define DMAE_REG_GO_C23 \
+ 0x00c0a4UL
+#define DMAE_REG_GO_C24 \
+ 0x00c0a8UL
+#define DMAE_REG_GO_C25 \
+ 0x00c0acUL
+#define DMAE_REG_GO_C26 \
+ 0x00c0b0UL
+#define DMAE_REG_GO_C27 \
+ 0x00c0b4UL
+#define DMAE_REG_GO_C28 \
+ 0x00c0b8UL
+#define DMAE_REG_GO_C29 \
+ 0x00c0bcUL
+#define DMAE_REG_GO_C30 \
+ 0x00c0c0UL
+#define DMAE_REG_GO_C31 \
+ 0x00c0c4UL
+#define DMAE_REG_CMD_MEM \
+ 0x00c800UL
+#define QM_REG_MAXPQSIZETXSEL_0 \
+ 0x2f0440UL
+#define QM_REG_SDMCMDREADY \
+ 0x2f1e10UL
+#define QM_REG_SDMCMDADDR \
+ 0x2f1e04UL
+#define QM_REG_SDMCMDDATALSB \
+ 0x2f1e08UL
+#define QM_REG_SDMCMDDATAMSB \
+ 0x2f1e0cUL
+#define QM_REG_SDMCMDGO \
+ 0x2f1e14UL
+#define QM_REG_RLPFCRD \
+ 0x2f4d80UL
+#define QM_REG_RLPFINCVAL \
+ 0x2f4c80UL
+#define QM_REG_RLGLBLCRD \
+ 0x2f4400UL
+#define QM_REG_RLGLBLINCVAL \
+ 0x2f3400UL
+#define IGU_REG_ATTENTION_ENABLE \
+ 0x18083cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x180820UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x180824UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0x008400UL
+#define CAU_REG_SB_ADDR_MEMORY \
+ 0x1c8000UL
+#define CAU_REG_SB_VAR_MEMORY \
+ 0x1c6000UL
+#define CAU_REG_PI_MEMORY \
+ 0x1d0000UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x180800UL
+#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+ 0x00849cUL
+#define MISC_REG_AEU_MASK_ATTN_IGU \
+ 0x008494UL
+#define IGU_REG_CLEANUP_STATUS_0 \
+ 0x180980UL
+#define IGU_REG_CLEANUP_STATUS_1 \
+ 0x180a00UL
+#define IGU_REG_CLEANUP_STATUS_2 \
+ 0x180a80UL
+#define IGU_REG_CLEANUP_STATUS_3 \
+ 0x180b00UL
+#define IGU_REG_CLEANUP_STATUS_4 \
+ 0x180b80UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x180840UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x180848UL
+#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
+ 0x1 << 1)
+#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
+ 0x1 << 0)
+#define IGU_REG_MAPPING_MEMORY \
+ 0x184000UL
+#define MISCS_REG_GENERIC_POR_0 \
+ 0x0096d4UL
+#define MCP_REG_NVM_CFG4 \
+ 0xe0642cUL
+#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
+ 0x7 << 0)
+#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+ 0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
new file mode 100644
index 000000000000..31a1f1eb4f56
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -0,0 +1,360 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SP_H
+#define _QED_SP_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+
+enum spq_mode {
+ QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
+ QED_SPQ_MODE_CB, /* Client supplies a callback */
+ QED_SPQ_MODE_EBLOCK, /* QED should block until completion */
+};
+
+struct qed_spq_comp_cb {
+ void (*function)(struct qed_hwfn *,
+ void *,
+ union event_ring_data *,
+ u8 fw_return_code);
+ void *cookie;
+};
+
+/**
+ * @brief qed_eth_cqe_completion - handles the completion of a
+ * ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return int
+ */
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe);
+
+/**
+ * @file
+ *
+ * QED Slow-hwfn queue interface
+ */
+
+union ramrod_data {
+ struct pf_start_ramrod_data pf_start;
+ struct rx_queue_start_ramrod_data rx_queue_start;
+ struct rx_queue_update_ramrod_data rx_queue_update;
+ struct rx_queue_stop_ramrod_data rx_queue_stop;
+ struct tx_queue_start_ramrod_data tx_queue_start;
+ struct tx_queue_stop_ramrod_data tx_queue_stop;
+ struct vport_start_ramrod_data vport_start;
+ struct vport_stop_ramrod_data vport_stop;
+ struct vport_update_ramrod_data vport_update;
+ struct vport_filter_update_ramrod_data vport_filter_update;
+};
+
+#define EQ_MAX_CREDIT 0xffffffff
+
+enum spq_priority {
+ QED_SPQ_PRIORITY_NORMAL,
+ QED_SPQ_PRIORITY_HIGH,
+};
+
+union qed_spq_req_comp {
+ struct qed_spq_comp_cb cb;
+ u64 *done_addr;
+};
+
+struct qed_spq_comp_done {
+ u64 done;
+ u8 fw_return_code;
+};
+
+struct qed_spq_entry {
+ struct list_head list;
+
+ u8 flags;
+
+ /* HSI slow path element */
+ struct slow_path_element elem;
+
+ union ramrod_data ramrod;
+
+ enum spq_priority priority;
+
+ /* pending queue for this entry */
+ struct list_head *queue;
+
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb comp_cb;
+ struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct qed_eq {
+ struct qed_chain chain;
+ u8 eq_sb_index; /* index within the SB */
+ __le16 *p_fw_cons; /* ptr to index value */
+};
+
+struct qed_consq {
+ struct qed_chain chain;
+};
+
+struct qed_spq {
+ spinlock_t lock; /* SPQ lock */
+
+ struct list_head unlimited_pending;
+ struct list_head pending;
+ struct list_head completion_pending;
+ struct list_head free_pool;
+
+ struct qed_chain chain;
+
+ /* allocated dma-able memory for spq entries (+ramrod data) */
+ dma_addr_t p_phys;
+ struct qed_spq_entry *p_virt;
+
+ /* Used as index for completions (returns on EQ by FW) */
+ u16 echo_idx;
+
+ /* Statistics */
+ u32 unlimited_pending_count;
+ u32 normal_count;
+ u32 high_count;
+ u32 comp_sent_count;
+ u32 comp_count;
+
+ u32 cid;
+};
+
+/**
+ * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return int
+ */
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code);
+
+/**
+ * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_spq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_get_entry - Obtain an entrry from the spq
+ * free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return int
+ */
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent);
+
+/**
+ * @brief qed_spq_return_entry - Return an entry to spq free
+ * pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+/**
+ * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+ u16 num_elem);
+
+/**
+ * @brief qed_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod);
+
+/**
+ * @brief qed_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return int
+ */
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie);
+
+/**
+ * @brief qed_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return int
+ */
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data);
+
+/**
+ * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_alloc - Allocates & initializes an ConsQ
+ * struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_setup - Reset the ConsQ to its start
+ * state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq);
+
+/**
+ * @brief qed_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq);
+
+/**
+ * @file
+ *
+ * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
+ */
+
+#define QED_SP_EQ_COMPLETION 0x01
+#define QED_SP_CQE_COMPLETION 0x02
+
+struct qed_sp_init_request_params {
+ size_t ramrod_data_size;
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb *p_comp_data;
+};
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u32 cid,
+ u16 opaque_fid,
+ u8 cmd,
+ u8 protocol,
+ struct qed_sp_init_request_params *p_params);
+
+/**
+ * @brief qed_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param mode
+ *
+ * @return int
+ */
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ enum mf_mode mode);
+
+/**
+ * @brief qed_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
new file mode 100644
index 000000000000..6f7879136633
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -0,0 +1,170 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u32 cid,
+ u16 opaque_fid,
+ u8 cmd,
+ u8 protocol,
+ struct qed_sp_init_request_params *p_params)
+{
+ int rc = -EINVAL;
+ struct qed_spq_entry *p_ent = NULL;
+ u32 opaque_cid = opaque_fid << 16 | cid;
+
+ if (!pp_ent)
+ return -ENOMEM;
+
+ rc = qed_spq_get_entry(p_hwfn, pp_ent);
+
+ if (rc != 0)
+ return rc;
+
+ p_ent = *pp_ent;
+
+ p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid);
+ p_ent->elem.hdr.cmd_id = cmd;
+ p_ent->elem.hdr.protocol_id = protocol;
+
+ p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
+ p_ent->comp_mode = p_params->comp_mode;
+ p_ent->comp_done.done = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ p_ent->comp_cb.cookie = &p_ent->comp_done;
+ break;
+
+ case QED_SPQ_MODE_BLOCK:
+ if (!p_params->p_comp_data)
+ return -EINVAL;
+
+ p_ent->comp_cb.cookie = p_params->p_comp_data->cookie;
+ break;
+
+ case QED_SPQ_MODE_CB:
+ if (!p_params->p_comp_data)
+ p_ent->comp_cb.function = NULL;
+ else
+ p_ent->comp_cb = *p_params->p_comp_data;
+ break;
+
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+ opaque_cid, cmd, protocol,
+ (unsigned long)&p_ent->ramrod,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+ if (p_params->ramrod_data_size)
+ memset(&p_ent->ramrod, 0, p_params->ramrod_data_size);
+
+ return 0;
+}
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ enum mf_mode mode)
+{
+ struct qed_sp_init_request_params params;
+ struct pf_start_ramrod_data *p_ramrod = NULL;
+ u16 sb = qed_int_get_sp_sb_id(p_hwfn);
+ u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ /* update initial eq producer */
+ qed_eq_prod_update(p_hwfn,
+ qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+ memset(&params, 0, sizeof(params));
+ params.ramrod_data_size = sizeof(*p_ramrod);
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn,
+ &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ COMMON_RAMROD_PF_START,
+ PROTOCOLID_COMMON,
+ &params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.pf_start;
+
+ p_ramrod->event_ring_sb_id = cpu_to_le16(sb);
+ p_ramrod->event_ring_sb_index = sb_index;
+ p_ramrod->path_id = QED_PATH_ID(p_hwfn);
+ p_ramrod->dont_log_ramrods = 0;
+ p_ramrod->log_type_mask = cpu_to_le16(0xf);
+ p_ramrod->mf_mode = mode;
+ p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+ /* Place EQ address in RAMROD */
+ p_ramrod->event_ring_pbl_addr.hi =
+ DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_ramrod->event_ring_pbl_addr.lo =
+ DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
+
+ p_ramrod->consolid_q_pbl_addr.hi =
+ DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+ p_ramrod->consolid_q_pbl_addr.lo =
+ DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+ p_hwfn->hw_info.personality = PERSONALITY_ETH;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
+ sb, sb_index,
+ (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
+ p_ramrod->outer_tag);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sp_init_request_params params;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ memset(&params, 0, sizeof(params));
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+ &params);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
new file mode 100644
index 000000000000..7c0b8459666e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/***************************************************************************
+* Structures & Definitions
+***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
+#define SPQ_BLOCK_SLEEP_LENGTH (1000)
+
+/***************************************************************************
+* Blocking Imp. (BLOCK/EBLOCK mode)
+***************************************************************************/
+static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct qed_spq_comp_done *comp_done;
+
+ comp_done = (struct qed_spq_comp_done *)cookie;
+
+ comp_done->done = 0x1;
+ comp_done->fw_return_code = fw_return_code;
+
+ /* make update visible to waiting thread */
+ smp_wmb();
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret)
+{
+ int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ struct qed_spq_comp_done *comp_done;
+ int rc;
+
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+ while (sleep_count) {
+ /* validate we receive completion update */
+ smp_rmb();
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+ usleep_range(5000, 10000);
+ sleep_count--;
+ }
+
+ DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+ rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != 0)
+ DP_NOTICE(p_hwfn, "MCP drain failed\n");
+
+ /* Retry after drain */
+ sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ while (sleep_count) {
+ /* validate we receive completion update */
+ smp_rmb();
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+ usleep_range(5000, 10000);
+ sleep_count--;
+ }
+
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+
+ DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+
+ return -EBUSY;
+}
+
+/***************************************************************************
+* SPQ entries inner API
+***************************************************************************/
+static int
+qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ p_ent->elem.hdr.echo = 0;
+ p_hwfn->p_spq->echo_idx++;
+ p_ent->flags = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ case QED_SPQ_MODE_BLOCK:
+ p_ent->comp_cb.function = qed_spq_blocking_cb;
+ break;
+ case QED_SPQ_MODE_CB:
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ p_ent->elem.hdr.cid,
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi,
+ p_ent->elem.data_ptr.lo,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ return 0;
+}
+
+/***************************************************************************
+* HSI access
+***************************************************************************/
+static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq)
+{
+ u16 pq;
+ struct qed_cxt_info cxt_info;
+ struct core_conn_context *p_cxt;
+ union qed_qm_pq_params pq_params;
+ int rc;
+
+ cxt_info.iid = p_spq->cid;
+
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+ if (rc < 0) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+ p_spq->cid);
+ return;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+ /* QM physical queue */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+
+ p_cxt->xstorm_st_context.spq_base_lo =
+ DMA_LO_LE(p_spq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.spq_base_hi =
+ DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+ p_cxt->xstorm_st_context.consolid_base_addr.lo =
+ DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.consolid_base_addr.hi =
+ DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq,
+ struct qed_spq_entry *p_ent)
+{
+ struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct slow_path_element *elem;
+ struct core_db_data db;
+
+ elem = qed_chain_produce(p_chain);
+ if (!elem) {
+ DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
+ return -EINVAL;
+ }
+
+ *elem = p_ent->elem; /* struct assignment */
+
+ /* send a doorbell on the slow hwfn session */
+ memset(&db, 0, sizeof(db));
+ SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* validate producer is up to-date */
+ rmb();
+
+ db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+
+ /* do not reorder */
+ barrier();
+
+ DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+
+ /* make sure doorbell is rang */
+ mmiowb();
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
+ qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
+ p_spq->cid, db.params, db.agg_flags,
+ qed_chain_get_prod_idx(p_chain));
+
+ return 0;
+}
+
+/***************************************************************************
+* Asynchronous events
+***************************************************************************/
+static int
+qed_async_event_completion(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ DP_NOTICE(p_hwfn,
+ "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return -EINVAL;
+}
+
+/***************************************************************************
+* EQ API
+***************************************************************************/
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod)
+{
+ u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ mmiowb();
+}
+
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie)
+
+{
+ struct qed_eq *p_eq = cookie;
+ struct qed_chain *p_chain = &p_eq->chain;
+ int rc = 0;
+
+ /* take a snapshot of the FW consumer */
+ u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+ /* Need to guarantee the fw_cons index we use points to a usuable
+ * element (to comply with our chain), so our macros would comply
+ */
+ if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
+ qed_chain_get_usable_per_page(p_chain))
+ fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
+
+ /* Complete current segment of eq entries */
+ while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
+ struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
+
+ if (!p_eqe) {
+ rc = -EINVAL;
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+ p_eqe->opcode,
+ p_eqe->protocol_id,
+ p_eqe->reserved0,
+ le16_to_cpu(p_eqe->echo),
+ p_eqe->fw_return_code,
+ p_eqe->flags);
+
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+ if (qed_async_event_completion(p_hwfn, p_eqe))
+ rc = -EINVAL;
+ } else if (qed_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data)) {
+ rc = -EINVAL;
+ }
+
+ qed_chain_recycle_consumed(p_chain);
+ }
+
+ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+
+ return rc;
+}
+
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+ u16 num_elem)
+{
+ struct qed_eq *p_eq;
+
+ /* Allocate EQ struct */
+ p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
+ if (!p_eq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+ return NULL;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ num_elem,
+ sizeof(union event_ring_element),
+ &p_eq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+ goto eq_allocate_fail;
+ }
+
+ /* register EQ completion on the SP SB */
+ qed_int_register_cb(p_hwfn,
+ qed_eq_completion,
+ p_eq,
+ &p_eq->eq_sb_index,
+ &p_eq->p_fw_cons);
+
+ return p_eq;
+
+eq_allocate_fail:
+ qed_eq_free(p_hwfn, p_eq);
+ return NULL;
+}
+
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq)
+{
+ qed_chain_reset(&p_eq->chain);
+}
+
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq)
+{
+ if (!p_eq)
+ return;
+ qed_chain_free(p_hwfn->cdev, &p_eq->chain);
+ kfree(p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static int qed_cqe_completion(
+ struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe,
+ enum protocol_type protocol)
+{
+ /* @@@tmp - it's possible we'll eventually want to handle some
+ * actual commands that can arrive here, but for now this is only
+ * used to complete the ramrod using the echo value on the cqe
+ */
+ return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
+}
+
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ int rc;
+
+ rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+ cqe->ramrod_cmd_id);
+
+ return rc;
+}
+
+/***************************************************************************
+* Slow hwfn Queue (spq)
+***************************************************************************/
+void qed_spq_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_virt = NULL;
+ dma_addr_t p_phys = 0;
+ unsigned int i = 0;
+
+ INIT_LIST_HEAD(&p_spq->pending);
+ INIT_LIST_HEAD(&p_spq->completion_pending);
+ INIT_LIST_HEAD(&p_spq->free_pool);
+ INIT_LIST_HEAD(&p_spq->unlimited_pending);
+ spin_lock_init(&p_spq->lock);
+
+ /* SPQ empty pool */
+ p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
+ p_virt = p_spq->p_virt;
+
+ for (i = 0; i < p_spq->chain.capacity; i++) {
+ p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
+ p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+
+ list_add_tail(&p_virt->list, &p_spq->free_pool);
+
+ p_virt++;
+ p_phys += sizeof(struct qed_spq_entry);
+ }
+
+ /* Statistics */
+ p_spq->normal_count = 0;
+ p_spq->comp_count = 0;
+ p_spq->comp_sent_count = 0;
+ p_spq->unlimited_pending_count = 0;
+ p_spq->echo_idx = 0;
+
+ /* SPQ cid, cannot fail */
+ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+ qed_spq_hw_initialize(p_hwfn, p_spq);
+
+ /* reset the chain itself */
+ qed_chain_reset(&p_spq->chain);
+}
+
+int qed_spq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = NULL;
+ dma_addr_t p_phys = 0;
+ struct qed_spq_entry *p_virt = NULL;
+
+ /* SPQ struct */
+ p_spq =
+ kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
+ if (!p_spq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+ return -ENOMEM;
+ }
+
+ /* SPQ ring */
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_SINGLE,
+ 0, /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element),
+ &p_spq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+ goto spq_allocate_fail;
+ }
+
+ /* allocate and fill the SPQ elements (incl. ramrod data list) */
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_spq->chain.capacity *
+ sizeof(struct qed_spq_entry),
+ &p_phys,
+ GFP_KERNEL);
+
+ if (!p_virt)
+ goto spq_allocate_fail;
+
+ p_spq->p_virt = p_virt;
+ p_spq->p_phys = p_phys;
+ p_hwfn->p_spq = p_spq;
+
+ return 0;
+
+spq_allocate_fail:
+ qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+ kfree(p_spq);
+ return -ENOMEM;
+}
+
+void qed_spq_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ if (!p_spq)
+ return;
+
+ if (p_spq->p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_spq->chain.capacity *
+ sizeof(struct qed_spq_entry),
+ p_spq->p_virt,
+ p_spq->p_phys);
+
+ qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+ ;
+ kfree(p_spq);
+}
+
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = 0;
+
+ spin_lock_bh(&p_spq->lock);
+
+ if (list_empty(&p_spq->free_pool)) {
+ p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
+ if (!p_ent) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ p_ent->queue = &p_spq->unlimited_pending;
+ } else {
+ p_ent = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_ent->list);
+ p_ent->queue = &p_spq->pending;
+ }
+
+ *pp_ent = p_ent;
+
+out_unlock:
+ spin_unlock_bh(&p_spq->lock);
+ return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ spin_lock_bh(&p_hwfn->p_spq->lock);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief qed_spq_add_entry - adds a new entry to the pending
+ * list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return int
+ */
+static int
+qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ enum spq_priority priority)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ struct qed_spq_entry *p_en2;
+
+ if (list_empty(&p_spq->free_pool)) {
+ list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
+ p_spq->unlimited_pending_count++;
+
+ return 0;
+ }
+
+ p_en2 = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_en2->list);
+
+ /* Strcut assignment */
+ *p_en2 = *p_ent;
+
+ kfree(p_ent);
+
+ p_ent = p_en2;
+ }
+
+ /* entry is to be placed in 'pending' queue */
+ switch (priority) {
+ case QED_SPQ_PRIORITY_NORMAL:
+ list_add_tail(&p_ent->list, &p_spq->pending);
+ p_spq->normal_count++;
+ break;
+ case QED_SPQ_PRIORITY_HIGH:
+ list_add(&p_ent->list, &p_spq->pending);
+ p_spq->high_count++;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/***************************************************************************
+* Accessor
+***************************************************************************/
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_spq)
+ return 0xffffffff; /* illegal */
+ return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+* Posting new Ramrods
+***************************************************************************/
+static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
+ struct list_head *head,
+ u32 keep_reserve)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ int rc;
+
+ while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+ !list_empty(head)) {
+ struct qed_spq_entry *p_ent =
+ list_first_entry(head, struct qed_spq_entry, list);
+ list_del(&p_ent->list);
+ list_add_tail(&p_ent->list, &p_spq->completion_pending);
+ p_spq->comp_sent_count++;
+
+ rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
+ if (rc) {
+ list_del(&p_ent->list);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+
+ while (!list_empty(&p_spq->free_pool)) {
+ if (list_empty(&p_spq->unlimited_pending))
+ break;
+
+ p_ent = list_first_entry(&p_spq->unlimited_pending,
+ struct qed_spq_entry,
+ list);
+ if (!p_ent)
+ return -EINVAL;
+
+ list_del(&p_ent->list);
+
+ qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ }
+
+ return qed_spq_post_list(p_hwfn, &p_spq->pending,
+ SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ int rc = 0;
+ struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
+ bool b_ret_ent = true;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
+ return -EINVAL;
+ }
+
+ /* Complete the entry */
+ rc = qed_spq_fill_entry(p_hwfn, p_ent);
+
+ spin_lock_bh(&p_spq->lock);
+
+ /* Check return value after LOCK is taken for cleaner error flow */
+ if (rc)
+ goto spq_post_fail;
+
+ /* Add the request to the pending queue */
+ rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ if (rc)
+ goto spq_post_fail;
+
+ rc = qed_spq_pend_post(p_hwfn);
+ if (rc) {
+ /* Since it's possible that pending failed for a different
+ * entry [although unlikely], the failed entry was already
+ * dealt with; No need to return it here.
+ */
+ b_ret_ent = false;
+ goto spq_post_fail;
+ }
+
+ spin_unlock_bh(&p_spq->lock);
+
+ if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+ /* For entries in QED BLOCK mode, the completion code cannot
+ * perform the necessary cleanup - if it did, we couldn't
+ * access p_ent here to see whether it's successful or not.
+ * Thus, after gaining the answer perform the cleanup here.
+ */
+ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+ if (rc)
+ goto spq_post_fail2;
+
+ /* return to pool */
+ qed_spq_return_entry(p_hwfn, p_ent);
+ }
+ return rc;
+
+spq_post_fail2:
+ spin_lock_bh(&p_spq->lock);
+ list_del(&p_ent->list);
+ qed_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+ /* return to the free pool */
+ if (b_ret_ent)
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_spq->lock);
+
+ return rc;
+}
+
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data)
+{
+ struct qed_spq *p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_spq_entry *tmp;
+ struct qed_spq_entry *found = NULL;
+ int rc;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ p_spq = p_hwfn->p_spq;
+ if (!p_spq)
+ return -EINVAL;
+
+ spin_lock_bh(&p_spq->lock);
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
+ list) {
+ if (p_ent->elem.hdr.echo == echo) {
+ list_del(&p_ent->list);
+
+ qed_chain_return_produced(&p_spq->chain);
+ p_spq->comp_count++;
+ found = p_ent;
+ break;
+ }
+ }
+
+ /* Release lock before callback, as callback may post
+ * an additional ramrod.
+ */
+ spin_unlock_bh(&p_spq->lock);
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an entry this EQE completes\n");
+ return -EEXIST;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+ p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+ if (found->comp_cb.function)
+ found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+ fw_return_code);
+
+ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
+ /* EBLOCK is responsible for freeing its own entry */
+ qed_spq_return_entry(p_hwfn, found);
+
+ /* Attempt to post pending requests */
+ spin_lock_bh(&p_spq->lock);
+ rc = qed_spq_pend_post(p_hwfn);
+ spin_unlock_bh(&p_spq->lock);
+
+ return rc;
+}
+
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_consq *p_consq;
+
+ /* Allocate ConsQ struct */
+ p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
+ if (!p_consq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+ return NULL;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_PAGE_SIZE / 0x80,
+ 0x80,
+ &p_consq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+ goto consq_allocate_fail;
+ }
+
+ return p_consq;
+
+consq_allocate_fail:
+ qed_consq_free(p_hwfn, p_consq);
+ return NULL;
+}
+
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq)
+{
+ qed_chain_reset(&p_consq->chain);
+}
+
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq)
+{
+ if (!p_consq)
+ return;
+ qed_chain_free(p_hwfn->cdev, &p_consq->chain);
+ kfree(p_consq);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
new file mode 100644
index 000000000000..06ff90d87572
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_QEDE) := qede.o
+
+qede-y := qede_main.o qede_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
new file mode 100644
index 000000000000..ea00d5f3bab4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -0,0 +1,285 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#ifndef _QEDE_H_
+#define _QEDE_H_
+#include <linux/compiler.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_eth_if.h>
+
+#define QEDE_MAJOR_VERSION 8
+#define QEDE_MINOR_VERSION 4
+#define QEDE_REVISION_VERSION 0
+#define QEDE_ENGINEERING_VERSION 0
+#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
+ __stringify(QEDE_MINOR_VERSION) "." \
+ __stringify(QEDE_REVISION_VERSION) "." \
+ __stringify(QEDE_ENGINEERING_VERSION)
+
+#define QEDE_ETH_INTERFACE_VERSION 300
+
+#define DRV_MODULE_SYM qede
+
+struct qede_stats {
+ u64 no_buff_discards;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 coalesced_pkts;
+ u64 coalesced_events;
+ u64 coalesced_aborts_num;
+ u64 non_coalesced_pkts;
+ u64 coalesced_bytes;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_127_byte_packets;
+ u64 rx_255_byte_packets;
+ u64 rx_511_byte_packets;
+ u64 rx_1023_byte_packets;
+ u64 rx_1518_byte_packets;
+ u64 rx_1522_byte_packets;
+ u64 rx_2047_byte_packets;
+ u64 rx_4095_byte_packets;
+ u64 rx_9216_byte_packets;
+ u64 rx_16383_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 tx_mac_ctrl_frames;
+};
+
+struct qede_dev {
+ struct qed_dev *cdev;
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+
+ u32 dp_module;
+ u8 dp_level;
+
+ const struct qed_eth_ops *ops;
+
+ struct qed_dev_eth_info dev_info;
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
+ (edev)->dev_info.num_tc)
+
+ struct qede_fastpath *fp_array;
+ u16 num_rss;
+ u8 num_tc;
+#define QEDE_RSS_CNT(edev) ((edev)->num_rss)
+#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \
+ (edev)->num_tc)
+#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss)
+#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss)
+#define QEDE_TX_QUEUE(edev, txqidx) \
+ (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+ (edev), (txqidx))])
+
+ struct qed_int_info int_info;
+ unsigned char primary_mac[ETH_ALEN];
+
+ /* Smaller private varaiant of the RTNL lock */
+ struct mutex qede_lock;
+ u32 state; /* Protected by qede_lock */
+ u16 rx_buf_size;
+ /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
+ /* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
+ /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define QEDE_FW_RX_ALIGN_END \
+ max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+ struct qede_stats stats;
+ struct qed_update_vport_rss_params rss_params;
+ u16 q_num_rx_buffers; /* Must be a power of two */
+ u16 q_num_tx_buffers; /* Must be a power of two */
+
+ struct delayed_work sp_task;
+ unsigned long sp_flags;
+};
+
+enum QEDE_STATE {
+ QEDE_STATE_CLOSED,
+ QEDE_STATE_OPEN,
+};
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+#define MAX_NUM_TC 8
+#define MAX_NUM_PRI 8
+
+/* The driver supports the new build_skb() API:
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after the frame was DMA-ed.
+ */
+struct sw_rx_data {
+ u8 *data;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct qede_rx_queue {
+ __le16 *hw_cons_ptr;
+ struct sw_rx_data *sw_rx_ring;
+ u16 sw_rx_cons;
+ u16 sw_rx_prod;
+ struct qed_chain rx_bd_ring;
+ struct qed_chain rx_comp_ring;
+ void __iomem *hw_rxq_prod_addr;
+
+ int rx_buf_size;
+
+ u16 num_rx_buffers;
+ u16 rxq_id;
+
+ u64 rx_hw_errors;
+ u64 rx_alloc_errors;
+};
+
+union db_prod {
+ struct eth_db_data data;
+ u32 raw;
+};
+
+struct sw_tx_bd {
+ struct sk_buff *skb;
+ u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define QEDE_TSO_SPLIT_BD BIT(0)
+};
+
+struct qede_tx_queue {
+ int index; /* Queue index */
+ __le16 *hw_cons_ptr;
+ struct sw_tx_bd *sw_tx_ring;
+ u16 sw_tx_cons;
+ u16 sw_tx_prod;
+ struct qed_chain tx_pbl;
+ void __iomem *doorbell_addr;
+ union db_prod tx_db;
+
+ u16 num_tx_buffers;
+};
+
+#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
+ le32_to_cpu((bd)->addr.lo))
+#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
+ do { \
+ (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \
+ (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \
+ (bd)->nbytes = cpu_to_le16(len); \
+ } while (0)
+#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
+
+struct qede_fastpath {
+ struct qede_dev *edev;
+ u8 rss_id;
+ struct napi_struct napi;
+ struct qed_sb_info *sb_info;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txqs;
+
+#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
+ char name[VEC_NAME_SIZE];
+};
+
+/* Debug print definitions */
+#define DP_NAME(edev) ((edev)->ndev->name)
+
+#define XMIT_PLAIN 0
+#define XMIT_L4_CSUM BIT(0)
+#define XMIT_LSO BIT(1)
+#define XMIT_ENC BIT(2)
+
+#define QEDE_CSUM_ERROR BIT(0)
+#define QEDE_CSUM_UNNECESSARY BIT(1)
+
+#define QEDE_SP_RX_MODE 1
+
+union qede_reload_args {
+ u16 mtu;
+};
+
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
+void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_reload(struct qede_dev *edev,
+ void (*func)(struct qede_dev *edev,
+ union qede_reload_args *args),
+ union qede_reload_args *args);
+int qede_change_mtu(struct net_device *dev, int new_mtu);
+void qede_fill_by_demand_stats(struct qede_dev *edev);
+
+#define RX_RING_SIZE_POW 13
+#define RX_RING_SIZE BIT(RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+
+#define TX_RING_SIZE_POW 13
+#define TX_RING_SIZE BIT(TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+
+#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+
+#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
new file mode 100644
index 000000000000..3a362476a22c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -0,0 +1,385 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/capability.h>
+#include "qede.h"
+
+#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, pf_only) \
+ {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
+#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
+#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
+
+#define QEDE_RQSTAT_OFFSET(stat_name) \
+ (offsetof(struct qede_rx_queue, stat_name))
+#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_RQSTAT(stat_name) \
+ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+} qede_rqstats_arr[] = {
+ QEDE_RQSTAT(rx_hw_errors),
+ QEDE_RQSTAT(rx_alloc_errors),
+};
+
+#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
+#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
+ (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
+ qede_rqstats_arr[(sindex)].offset)))
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+ bool pf_only;
+} qede_stats_arr[] = {
+ QEDE_STAT(rx_ucast_bytes),
+ QEDE_STAT(rx_mcast_bytes),
+ QEDE_STAT(rx_bcast_bytes),
+ QEDE_STAT(rx_ucast_pkts),
+ QEDE_STAT(rx_mcast_pkts),
+ QEDE_STAT(rx_bcast_pkts),
+
+ QEDE_STAT(tx_ucast_bytes),
+ QEDE_STAT(tx_mcast_bytes),
+ QEDE_STAT(tx_bcast_bytes),
+ QEDE_STAT(tx_ucast_pkts),
+ QEDE_STAT(tx_mcast_pkts),
+ QEDE_STAT(tx_bcast_pkts),
+
+ QEDE_PF_STAT(rx_64_byte_packets),
+ QEDE_PF_STAT(rx_127_byte_packets),
+ QEDE_PF_STAT(rx_255_byte_packets),
+ QEDE_PF_STAT(rx_511_byte_packets),
+ QEDE_PF_STAT(rx_1023_byte_packets),
+ QEDE_PF_STAT(rx_1518_byte_packets),
+ QEDE_PF_STAT(rx_1522_byte_packets),
+ QEDE_PF_STAT(rx_2047_byte_packets),
+ QEDE_PF_STAT(rx_4095_byte_packets),
+ QEDE_PF_STAT(rx_9216_byte_packets),
+ QEDE_PF_STAT(rx_16383_byte_packets),
+ QEDE_PF_STAT(tx_64_byte_packets),
+ QEDE_PF_STAT(tx_65_to_127_byte_packets),
+ QEDE_PF_STAT(tx_128_to_255_byte_packets),
+ QEDE_PF_STAT(tx_256_to_511_byte_packets),
+ QEDE_PF_STAT(tx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
+ QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
+ QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
+ QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
+ QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
+
+ QEDE_PF_STAT(rx_mac_crtl_frames),
+ QEDE_PF_STAT(tx_mac_ctrl_frames),
+ QEDE_PF_STAT(rx_pause_frames),
+ QEDE_PF_STAT(tx_pause_frames),
+ QEDE_PF_STAT(rx_pfc_frames),
+ QEDE_PF_STAT(tx_pfc_frames),
+
+ QEDE_PF_STAT(rx_crc_errors),
+ QEDE_PF_STAT(rx_align_errors),
+ QEDE_PF_STAT(rx_carrier_errors),
+ QEDE_PF_STAT(rx_oversize_packets),
+ QEDE_PF_STAT(rx_jabbers),
+ QEDE_PF_STAT(rx_undersize_packets),
+ QEDE_PF_STAT(rx_fragments),
+ QEDE_PF_STAT(tx_lpi_entry_count),
+ QEDE_PF_STAT(tx_total_collisions),
+ QEDE_PF_STAT(brb_truncates),
+ QEDE_PF_STAT(brb_discards),
+ QEDE_STAT(no_buff_discards),
+ QEDE_PF_STAT(mftag_filter_discards),
+ QEDE_PF_STAT(mac_filter_discards),
+ QEDE_STAT(tx_err_drop_pkts),
+
+ QEDE_STAT(coalesced_pkts),
+ QEDE_STAT(coalesced_events),
+ QEDE_STAT(coalesced_aborts_num),
+ QEDE_STAT(non_coalesced_pkts),
+ QEDE_STAT(coalesced_bytes),
+};
+
+#define QEDE_STATS_DATA(dev, index) \
+ (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
+ + qede_stats_arr[(index)].offset)))
+
+#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+
+static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
+{
+ int i, j, k;
+
+ for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ strcpy(buf + j * ETH_GSTRING_LEN,
+ qede_stats_arr[i].string);
+ j++;
+ }
+
+ for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
+ strcpy(buf + j * ETH_GSTRING_LEN,
+ qede_rqstats_arr[k].string);
+}
+
+static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ qede_get_strings_stats(edev, buf);
+ break;
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ }
+}
+
+static void qede_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int sidx, cnt = 0;
+ int qid;
+
+ qede_fill_by_demand_stats(edev);
+
+ mutex_lock(&edev->qede_lock);
+
+ for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+ buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+
+ for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
+ buf[cnt] = 0;
+ for (qid = 0; qid < edev->num_rss; qid++)
+ buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
+ cnt++;
+ }
+
+ mutex_unlock(&edev->qede_lock);
+}
+
+static int qede_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int num_stats = QEDE_NUM_STATS;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return num_stats + QEDE_NUM_RQSTATS;
+
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ return -EINVAL;
+ }
+}
+
+static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ cmd->supported = current_link.supported_caps;
+ cmd->advertising = current_link.advertised_caps;
+ if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
+ ethtool_cmd_speed_set(cmd, current_link.speed);
+ cmd->duplex = current_link.duplex;
+ } else {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ }
+ cmd->port = current_link.port;
+ cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
+ cmd->lp_advertising = current_link.lp_caps;
+
+ return 0;
+}
+
+static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+ struct qed_link_params params;
+ u32 speed;
+
+ if (edev->dev_info.common.is_mf) {
+ DP_INFO(edev,
+ "Link parameters can not be changed in MF mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&current_link, 0, sizeof(current_link));
+ memset(&params, 0, sizeof(params));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ speed = ethtool_cmd_speed(cmd);
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ params.autoneg = true;
+ params.forced_speed = 0;
+ params.adv_speeds = cmd->advertising;
+ } else { /* forced speed */
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
+ params.autoneg = false;
+ params.forced_speed = speed;
+ switch (speed) {
+ case SPEED_10000:
+ if (!(current_link.supported_caps &
+ SUPPORTED_10000baseKR_Full)) {
+ DP_INFO(edev, "10G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = SUPPORTED_10000baseKR_Full;
+ break;
+ case SPEED_40000:
+ if (!(current_link.supported_caps &
+ SUPPORTED_40000baseLR4_Full)) {
+ DP_INFO(edev, "40G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+ break;
+ default:
+ DP_INFO(edev, "Unsupported speed %u\n", speed);
+ return -EINVAL;
+ }
+ }
+
+ params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &params);
+
+ return 0;
+}
+
+static void qede_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ strlcpy(info->driver, "qede", sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+
+ snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ edev->dev_info.common.fw_major,
+ edev->dev_info.common.fw_minor,
+ edev->dev_info.common.fw_rev,
+ edev->dev_info.common.fw_eng);
+
+ snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
+ edev->dev_info.common.mfw_rev & 0xFF);
+
+ if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) <
+ sizeof(info->fw_version)) {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "mfw %s storm %s", mfw, storm);
+ } else {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%s %s", mfw, storm);
+ }
+
+ strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+}
+
+static u32 qede_get_msglevel(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
+ edev->dp_module;
+}
+
+static void qede_set_msglevel(struct net_device *ndev, u32 level)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ qede_config_debug(level, &dp_module, &dp_level);
+
+ edev->dp_level = dp_level;
+ edev->dp_module = dp_module;
+ edev->ops->common->update_msglvl(edev->cdev,
+ dp_module, dp_level);
+}
+
+static u32 qede_get_link(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ return current_link.link_up;
+}
+
+static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+{
+ edev->ndev->mtu = args->mtu;
+}
+
+/* Netdevice NDOs */
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+#define ETH_MIN_PACKET_SIZE 60
+int qede_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ union qede_reload_args args;
+
+ if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+ DP_ERR(edev, "Can't support requested MTU size\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Configuring MTU size of %d\n", new_mtu);
+
+ /* Set the mtu field and re-start the interface if needed*/
+ args.mtu = new_mtu;
+
+ if (netif_running(edev->ndev))
+ qede_reload(edev, &qede_update_mtu, &args);
+
+ qede_update_mtu(edev, &args);
+
+ return 0;
+}
+
+static const struct ethtool_ops qede_ethtool_ops = {
+ .get_settings = qede_get_settings,
+ .set_settings = qede_set_settings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_sset_count = qede_get_sset_count,
+
+};
+
+void qede_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &qede_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
new file mode 100644
index 000000000000..f4657a2e730a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -0,0 +1,2584 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/io.h>
+#include <linux/netdev_features.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/vxlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/pkt_sched.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/random.h>
+#include <net/ip6_checksum.h>
+#include <linux/bitops.h>
+
+#include "qede.h"
+
+static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
+ DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static uint debug;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static const struct qed_eth_ops *qed_ops;
+
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_10 0x1635
+#define CHIP_NUM_57980S_MF 0x1636
+#define CHIP_NUM_57980S_100 0x1644
+#define CHIP_NUM_57980S_50 0x1654
+#define CHIP_NUM_57980S_25 0x1656
+
+#ifndef PCI_DEVICE_ID_NX2_57980E
+#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
+#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
+#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#endif
+
+static const struct pci_device_id qede_pci_tbl[] = {
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+
+#define TX_TIMEOUT (5 * HZ)
+
+static void qede_remove(struct pci_dev *pdev);
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq);
+static void qede_link_update(void *dev, struct qed_link_output *link);
+
+static struct pci_driver qede_pci_driver = {
+ .name = "qede",
+ .id_table = qede_pci_tbl,
+ .probe = qede_probe,
+ .remove = qede_remove,
+};
+
+static struct qed_eth_cb_ops qede_ll_ops = {
+ {
+ .link_update = qede_link_update,
+ },
+};
+
+static int qede_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct ethtool_drvinfo drvinfo;
+ struct qede_dev *edev;
+
+ /* Currently only support name change */
+ if (event != NETDEV_CHANGENAME)
+ goto done;
+
+ /* Check whether this is a qede device */
+ if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
+ goto done;
+
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
+ if (strcmp(drvinfo.driver, "qede"))
+ goto done;
+ edev = netdev_priv(ndev);
+
+ /* Notify qed of the name change */
+ if (!edev->ops || !edev->ops->common)
+ goto done;
+ edev->ops->common->set_id(edev->cdev, edev->ndev->name,
+ "qede");
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qede_netdev_notifier = {
+ .notifier_call = qede_netdev_event,
+};
+
+static
+int __init qede_init(void)
+{
+ int ret;
+ u32 qed_ver;
+
+ pr_notice("qede_init: %s\n", version);
+
+ qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
+ if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
+ pr_notice("Version mismatch [%08x != %08x]\n",
+ qed_ver,
+ QEDE_ETH_INTERFACE_VERSION);
+ return -EINVAL;
+ }
+
+ qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+ if (!qed_ops) {
+ pr_notice("Failed to get qed ethtool operations\n");
+ return -EINVAL;
+ }
+
+ /* Must register notifier before pci ops, since we might miss
+ * interface rename after pci probe and netdev registeration.
+ */
+ ret = register_netdevice_notifier(&qede_netdev_notifier);
+ if (ret) {
+ pr_notice("Failed to register netdevice_notifier\n");
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ ret = pci_register_driver(&qede_pci_driver);
+ if (ret) {
+ pr_notice("Failed to register driver\n");
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __exit qede_cleanup(void)
+{
+ pr_notice("qede_cleanup called\n");
+
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ pci_unregister_driver(&qede_pci_driver);
+ qed_put_eth_ops();
+}
+
+module_init(qede_init);
+module_exit(qede_cleanup);
+
+/* -------------------------------------------------------------------------
+ * START OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+/* Unmap the data and free skb */
+static int qede_free_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ int *len)
+{
+ u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_bd *tx_data_bd;
+ int bds_consumed = 0;
+ int nbds;
+ bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+ int i, split_bd_len = 0;
+
+ if (unlikely(!skb)) {
+ DP_ERR(edev,
+ "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+ idx, txq->sw_tx_cons, txq->sw_tx_prod);
+ return -1;
+ }
+
+ *len = skb->len;
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+ bds_consumed++;
+
+ nbds = first_bd->data.nbds;
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ bds_consumed++;
+ }
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ while (bds_consumed++ < nbds)
+ qed_chain_consume(&txq->tx_pbl);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring[idx].flags = 0;
+
+ return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *first_bd,
+ int nbd,
+ bool data_split)
+{
+ u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct eth_tx_bd *tx_data_bd;
+ int i, split_bd_len = 0;
+
+ /* Return prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod),
+ first_bd);
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ nbd--;
+ }
+
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < nbd; i++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ if (tx_data_bd->nbytes)
+ dma_unmap_page(&edev->pdev->dev,
+ BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ /* Return again prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod),
+ first_bd);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct qede_dev *edev,
+ struct sk_buff *skb,
+ int *ipv6_ext)
+{
+ u32 rc = XMIT_L4_CSUM;
+ __be16 l3_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return XMIT_PLAIN;
+
+ l3_proto = vlan_get_protocol(skb);
+ if (l3_proto == htons(ETH_P_IPV6) &&
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ *ipv6_ext = 1;
+
+ if (skb_is_gso(skb))
+ rc |= XMIT_LSO;
+
+ return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+ struct eth_tx_2nd_bd *second_bd,
+ struct eth_tx_3rd_bd *third_bd)
+{
+ u8 l4_proto;
+ u16 bd2_bits = 0, bd2_bits2 = 0;
+
+ bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+ bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+ bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ else
+ l4_proto = ip_hdr(skb)->protocol;
+
+ if (l4_proto == IPPROTO_UDP)
+ bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+ if (third_bd) {
+ third_bd->data.bitfields |=
+ ((tcp_hdrlen(skb) / 4) &
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
+ }
+
+ second_bd->data.bitfields = cpu_to_le16(bd2_bits);
+ second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_dev *edev,
+ skb_frag_t *frag,
+ struct eth_tx_bd *bd)
+{
+ dma_addr_t mapping;
+
+ /* Map skb non-linear frag data for DMA */
+ mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+ return -ENOMEM;
+ }
+
+ /* Setup the data pointer of the frag data */
+ BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+ return 0;
+}
+
+/* Main transmit function */
+static
+netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct netdev_queue *netdev_txq;
+ struct qede_tx_queue *txq;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_2nd_bd *second_bd = NULL;
+ struct eth_tx_3rd_bd *third_bd = NULL;
+ struct eth_tx_bd *tx_data_bd = NULL;
+ u16 txq_index;
+ u8 nbd = 0;
+ dma_addr_t mapping;
+ int rc, frag_idx = 0, ipv6_ext = 0;
+ u8 xmit_type;
+ u16 idx;
+ u16 hlen;
+ bool data_split;
+
+ /* Get tx-queue context and netdev index */
+ txq_index = skb_get_queue_mapping(skb);
+ WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+ txq = QEDE_TX_QUEUE(edev, txq_index);
+ netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+ /* Current code doesn't support SKB linearization, since the max number
+ * of skb frags can be passed in the FW HSI.
+ */
+ BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET);
+
+ WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
+ (MAX_SKB_FRAGS + 1));
+
+ xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring[idx].skb = skb;
+ first_bd = (struct eth_tx_1st_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ first_bd->data.bd_flags.bitfields =
+ 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ return NETDEV_TX_OK;
+ }
+ nbd++;
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* In case there is IPv6 with extension headers or LSO we need 2nd and
+ * 3rd BDs.
+ */
+ if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+ second_bd = (struct eth_tx_2nd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(second_bd, 0, sizeof(*second_bd));
+
+ nbd++;
+ third_bd = (struct eth_tx_3rd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(third_bd, 0, sizeof(*third_bd));
+
+ nbd++;
+ /* We need to fill in additional data in second_bd... */
+ tx_data_bd = (struct eth_tx_bd *)second_bd;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+ }
+
+ /* Fill the parsing flags & params according to the requested offload */
+ if (xmit_type & XMIT_L4_CSUM) {
+ /* We don't re-calculate IP checksum as it is already done by
+ * the upper stack
+ */
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+ /* If the packet is IPv6 with extension header, indicate that
+ * to FW and pass few params, since the device cracker doesn't
+ * support parsing IPv6 with extension header/s.
+ */
+ if (unlikely(ipv6_ext))
+ qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+ }
+
+ if (xmit_type & XMIT_LSO) {
+ first_bd->data.bd_flags.bitfields |=
+ (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+ third_bd->data.lso_mss =
+ cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data;
+
+ /* @@@TBD - if will not be removed need to check */
+ third_bd->data.bitfields |=
+ (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+ /* Make life easier for FW guys who can't deal with header and
+ * data on same BD. If we need to split, use the second bd...
+ */
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "TSO split header size is %d (%x:%x)\n",
+ first_bd->nbytes, first_bd->addr.hi,
+ first_bd->addr.lo);
+
+ mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+ le32_to_cpu(first_bd->addr.lo)) +
+ hlen;
+
+ BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+ le16_to_cpu(first_bd->nbytes) -
+ hlen);
+
+ /* this marks the BD as one that has no
+ * individual mapping
+ */
+ txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+ first_bd->nbytes = cpu_to_le16(hlen);
+
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ data_split = true;
+ }
+ }
+
+ /* Handle fragmented skb */
+ /* special handle for frags inside 2nd and 3rd bds.. */
+ while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+ rc = map_frag_to_bd(edev,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+ data_split);
+ return NETDEV_TX_OK;
+ }
+
+ if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ else
+ tx_data_bd = NULL;
+
+ frag_idx++;
+ }
+
+ /* map last frags into 4th, 5th .... */
+ for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+
+ memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+ rc = map_frag_to_bd(edev,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+ data_split);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = nbd;
+
+ netdev_tx_sent_queue(netdev_txq, skb->len);
+
+ skb_tx_timestamp(skb);
+
+ /* Advance packet producer only before sending the packet since mapping
+ * of pages may fail.
+ */
+ txq->sw_tx_prod++;
+
+ /* 'next page' entries are counted in the producer value */
+ txq->tx_db.data.bd_prod =
+ cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+
+ if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+ < (MAX_SKB_FRAGS + 1))) {
+ netif_tx_stop_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Stop queue was called\n");
+ /* paired memory barrier is in qede_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons
+ */
+ smp_mb();
+
+ if (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1) &&
+ (edev->state == QEDE_STATE_OPEN)) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Wake queue was called\n");
+ }
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+ u16 hw_bd_cons;
+
+ /* Tell compiler that consumer and producer can change */
+ barrier();
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+ return 0;
+
+ return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static int qede_tx_int(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ struct netdev_queue *netdev_txq;
+ u16 hw_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ int rc;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ int len = 0;
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+ hw_bd_cons,
+ qed_chain_get_cons_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+ /* Taking tx_lock is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in qede_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
+ */
+
+ __netif_tx_lock(netdev_txq, smp_processor_id());
+
+ if ((netif_tx_queue_stopped(netdev_txq)) &&
+ (edev->state == QEDE_STATE_OPEN) &&
+ (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+ "Wake queue was called\n");
+ }
+
+ __netif_tx_unlock(netdev_txq);
+ }
+
+ return 0;
+}
+
+static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ return hw_comp_cons != sw_comp_cons;
+}
+
+static bool qede_has_tx_work(struct qede_fastpath *fp)
+{
+ u8 tc;
+
+ for (tc = 0; tc < fp->edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ return true;
+ return false;
+}
+
+/* This function copies the Rx buffer from the CONS position to the PROD
+ * position, since we failed to allocate a new Rx buffer.
+ */
+static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
+{
+ struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *sw_rx_data_cons =
+ &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ struct sw_rx_data *sw_rx_data_prod =
+ &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+ dma_unmap_addr_set(sw_rx_data_prod, mapping,
+ dma_unmap_addr(sw_rx_data_cons, mapping));
+
+ sw_rx_data_prod->data = sw_rx_data_cons->data;
+ memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
+
+ rxq->sw_rx_cons++;
+ rxq->sw_rx_prod++;
+}
+
+static inline void qede_update_rx_prod(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+ u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+ struct eth_rx_prod_data rx_prods = {0};
+
+ /* Update producers */
+ rx_prods.bd_prod = cpu_to_le16(bd_prod);
+ rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ */
+ wmb();
+
+ internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+ (u32 *)&rx_prods);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the napi lock is released and another qede_poll is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
+static u32 qede_get_rxhash(struct qede_dev *edev,
+ u8 bitfields,
+ __le32 rss_hash,
+ enum pkt_hash_types *rxhash_type)
+{
+ enum rss_hash_type htype;
+
+ htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+
+ if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
+ *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+ (htype == RSS_HASH_TYPE_IPV6)) ?
+ PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+ return le32_to_cpu(rss_hash);
+ }
+ *rxhash_type = PKT_HASH_TYPE_NONE;
+ return 0;
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+ skb_checksum_none_assert(skb);
+
+ if (csum_flag & QEDE_CSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct sk_buff *skb,
+ u16 vlan_tag)
+{
+ if (vlan_tag)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+
+ napi_gro_receive(&fp->napi, skb);
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 csum = 0;
+
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ csum = QEDE_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return csum;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+ struct qede_dev *edev = fp->edev;
+ struct qede_rx_queue *rxq = fp->rxq;
+
+ u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
+ int rx_pkt = 0;
+ u8 csum_flag;
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB, and then
+ * the CPU reads the hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Loop to complete all indicated BDs */
+ while (sw_comp_cons != hw_comp_cons) {
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ enum pkt_hash_types rxhash_type;
+ enum eth_rx_cqe_type cqe_type;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ struct sk_buff *skb;
+ u16 len, pad;
+ u32 rx_hash;
+ u8 *data;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)
+ qed_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+
+ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+ edev->ops->eth_cqe_completion(
+ edev->cdev, fp->rss_id,
+ (struct eth_slow_path_rx_cqe *)cqe);
+ goto next_cqe;
+ }
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ data = sw_rx_data->data;
+
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->pkt_len);
+ pad = fp_cqe->placement_offset;
+
+ /* For every Rx BD consumed, we allocate a new BD so the BD ring
+ * is always with a fixed size. If allocation fails, we take the
+ * consumed BD and return it to the ring in the PROD position.
+ * The packet that was received on that BD will be dropped (and
+ * not passed to the upper stack).
+ */
+ if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
+ dma_unmap_single(&edev->pdev->dev,
+ dma_unmap_addr(sw_rx_data, mapping),
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+ /* If this is an error packet then drop it */
+ parse_flag =
+ le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
+ csum_flag = qede_check_csum(parse_flag);
+ if (csum_flag == QEDE_CSUM_ERROR) {
+ DP_NOTICE(edev,
+ "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+ sw_comp_cons, parse_flag);
+ rxq->rx_hw_errors++;
+ kfree(data);
+ goto next_rx;
+ }
+
+ skb = build_skb(data, 0);
+
+ if (unlikely(!skb)) {
+ DP_NOTICE(edev,
+ "Build_skb failed, dropping incoming packet\n");
+ kfree(data);
+ rxq->rx_alloc_errors++;
+ goto next_rx;
+ }
+
+ skb_reserve(skb, pad);
+
+ } else {
+ DP_NOTICE(edev,
+ "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
+ qede_reuse_rx_data(rxq);
+ rxq->rx_alloc_errors++;
+ goto next_cqe;
+ }
+
+ sw_rx_data->data = NULL;
+
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+
+ rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
+ fp_cqe->rss_hash,
+ &rxhash_type);
+
+ skb_set_hash(skb, rx_hash, rxhash_type);
+
+ qede_set_skb_csum(skb, csum_flag);
+
+ skb_record_rx_queue(skb, fp->rss_id);
+
+ qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+ qed_chain_consume(&rxq->rx_bd_ring);
+
+next_rx:
+ rxq->sw_rx_cons++;
+ rx_pkt++;
+
+next_cqe: /* don't consume bd rx buffer */
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ /* CR TPA - revisit how to handle budget in TPA perhaps
+ * increase on "end"
+ */
+ if (rx_pkt == budget)
+ break;
+ } /* repeat while sw_comp_cons != hw_comp_cons... */
+
+ /* Update producers */
+ qede_update_rx_prod(edev, rxq);
+
+ return rx_pkt;
+}
+
+static int qede_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+ struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+ napi);
+ struct qede_dev *edev = fp->edev;
+
+ while (1) {
+ u8 tc;
+
+ for (tc = 0; tc < edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ qede_tx_int(edev, &fp->txqs[tc]);
+
+ if (qede_has_rx_work(fp->rxq)) {
+ work_done += qede_rx_int(fp, budget - work_done);
+
+ /* must not complete if we consumed full budget */
+ if (work_done >= budget)
+ break;
+ }
+
+ /* Fall out from the NAPI loop if needed */
+ if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
+ qed_sb_update_sb_idx(fp->sb_info);
+ /* *_has_*_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that
+ * we won't write the "newer" value of the status block
+ * to HW (if there was a DMA right after
+ * qede_has_rx_work and if there is no rmb, the memory
+ * reading (qed_sb_update_sb_idx) may be postponed
+ * to right before *_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
+
+ if (!(qede_has_rx_work(fp->rxq) ||
+ qede_has_tx_work(fp))) {
+ napi_complete(napi);
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+ 1 /*update*/);
+ break;
+ }
+ }
+ }
+
+ return work_done;
+}
+
+static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+ struct qede_fastpath *fp = fp_cookie;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+ napi_schedule_irqoff(&fp->napi);
+ return IRQ_HANDLED;
+}
+
+/* -------------------------------------------------------------------------
+ * END OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_open(struct net_device *ndev);
+static int qede_close(struct net_device *ndev);
+static int qede_set_mac_addr(struct net_device *ndev, void *p);
+static void qede_set_rx_mode(struct net_device *ndev);
+static void qede_config_rx_mode(struct net_device *ndev);
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char mac[ETH_ALEN])
+{
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.mac_valid = 1;
+ ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+void qede_fill_by_demand_stats(struct qede_dev *edev)
+{
+ struct qed_eth_stats stats;
+
+ edev->ops->get_vport_stats(edev->cdev, &stats);
+ edev->stats.no_buff_discards = stats.no_buff_discards;
+ edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
+ edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
+ edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
+ edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
+ edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
+ edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
+ edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
+ edev->stats.mac_filter_discards = stats.mac_filter_discards;
+
+ edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
+ edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
+ edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
+ edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
+ edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
+ edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
+ edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
+ edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
+ edev->stats.coalesced_events = stats.tpa_coalesced_events;
+ edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
+ edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
+ edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
+
+ edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
+ edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
+ edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
+ edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
+ edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
+ edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
+ edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
+ edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
+ edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
+ edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
+ edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+ edev->stats.rx_crc_errors = stats.rx_crc_errors;
+ edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
+ edev->stats.rx_pause_frames = stats.rx_pause_frames;
+ edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
+ edev->stats.rx_align_errors = stats.rx_align_errors;
+ edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
+ edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
+ edev->stats.rx_jabbers = stats.rx_jabbers;
+ edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
+ edev->stats.rx_fragments = stats.rx_fragments;
+ edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
+ edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
+ edev->stats.tx_128_to_255_byte_packets =
+ stats.tx_128_to_255_byte_packets;
+ edev->stats.tx_256_to_511_byte_packets =
+ stats.tx_256_to_511_byte_packets;
+ edev->stats.tx_512_to_1023_byte_packets =
+ stats.tx_512_to_1023_byte_packets;
+ edev->stats.tx_1024_to_1518_byte_packets =
+ stats.tx_1024_to_1518_byte_packets;
+ edev->stats.tx_1519_to_2047_byte_packets =
+ stats.tx_1519_to_2047_byte_packets;
+ edev->stats.tx_2048_to_4095_byte_packets =
+ stats.tx_2048_to_4095_byte_packets;
+ edev->stats.tx_4096_to_9216_byte_packets =
+ stats.tx_4096_to_9216_byte_packets;
+ edev->stats.tx_9217_to_16383_byte_packets =
+ stats.tx_9217_to_16383_byte_packets;
+ edev->stats.tx_pause_frames = stats.tx_pause_frames;
+ edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
+ edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
+ edev->stats.tx_total_collisions = stats.tx_total_collisions;
+ edev->stats.brb_truncates = stats.brb_truncates;
+ edev->stats.brb_discards = stats.brb_discards;
+ edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+}
+
+static struct rtnl_link_stats64 *qede_get_stats64(
+ struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ qede_fill_by_demand_stats(edev);
+
+ stats->rx_packets = edev->stats.rx_ucast_pkts +
+ edev->stats.rx_mcast_pkts +
+ edev->stats.rx_bcast_pkts;
+ stats->tx_packets = edev->stats.tx_ucast_pkts +
+ edev->stats.tx_mcast_pkts +
+ edev->stats.tx_bcast_pkts;
+
+ stats->rx_bytes = edev->stats.rx_ucast_bytes +
+ edev->stats.rx_mcast_bytes +
+ edev->stats.rx_bcast_bytes;
+
+ stats->tx_bytes = edev->stats.tx_ucast_bytes +
+ edev->stats.tx_mcast_bytes +
+ edev->stats.tx_bcast_bytes;
+
+ stats->tx_errors = edev->stats.tx_err_drop_pkts;
+ stats->multicast = edev->stats.rx_mcast_pkts +
+ edev->stats.rx_bcast_pkts;
+
+ stats->rx_fifo_errors = edev->stats.no_buff_discards;
+
+ stats->collisions = edev->stats.tx_total_collisions;
+ stats->rx_crc_errors = edev->stats.rx_crc_errors;
+ stats->rx_frame_errors = edev->stats.rx_align_errors;
+
+ return stats;
+}
+
+static const struct net_device_ops qede_netdev_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_get_stats64 = qede_get_stats64,
+};
+
+/* -------------------------------------------------------------------------
+ * START OF PROBE / REMOVE
+ * -------------------------------------------------------------------------
+ */
+
+static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
+ struct pci_dev *pdev,
+ struct qed_dev_eth_info *info,
+ u32 dp_module,
+ u8 dp_level)
+{
+ struct net_device *ndev;
+ struct qede_dev *edev;
+
+ ndev = alloc_etherdev_mqs(sizeof(*edev),
+ info->num_queues,
+ info->num_queues);
+ if (!ndev) {
+ pr_err("etherdev allocation failed\n");
+ return NULL;
+ }
+
+ edev = netdev_priv(ndev);
+ edev->ndev = ndev;
+ edev->cdev = cdev;
+ edev->pdev = pdev;
+ edev->dp_module = dp_module;
+ edev->dp_level = dp_level;
+ edev->ops = qed_ops;
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ memset(&edev->stats, 0, sizeof(edev->stats));
+ memcpy(&edev->dev_info, info, sizeof(*info));
+
+ edev->num_tc = edev->dev_info.num_tc;
+
+ return edev;
+}
+
+static void qede_init_ndev(struct qede_dev *edev)
+{
+ struct net_device *ndev = edev->ndev;
+ struct pci_dev *pdev = edev->pdev;
+ u32 hw_features;
+
+ pci_set_drvdata(pdev, ndev);
+
+ ndev->mem_start = edev->dev_info.common.pci_mem_start;
+ ndev->base_addr = ndev->mem_start;
+ ndev->mem_end = edev->dev_info.common.pci_mem_end;
+ ndev->irq = edev->dev_info.common.pci_irq;
+
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ ndev->netdev_ops = &qede_netdev_ops;
+
+ qede_set_ethtool_ops(ndev);
+
+ /* user-changeble features */
+ hw_features = NETIF_F_GRO | NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HIGHDMA;
+ ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ ndev->hw_features = hw_features;
+
+ /* Set network device HW mac */
+ ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+}
+
+/* This function converts from 32b param to two params of level and module
+ * Input 32b decoding:
+ * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
+ * 'happy' flow, e.g. memory allocation failed.
+ * b30 - enable all INFO prints. INFO prints are for major steps in the flow
+ * and provide important parameters.
+ * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
+ * module. VERBOSE prints are for tracking the specific flow in low level.
+ *
+ * Notice that the level should be that of the lowest required logs.
+ */
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
+{
+ *p_dp_level = QED_LEVEL_NOTICE;
+ *p_dp_module = 0;
+
+ if (debug & QED_LOG_VERBOSE_MASK) {
+ *p_dp_level = QED_LEVEL_VERBOSE;
+ *p_dp_module = (debug & 0x3FFFFFFF);
+ } else if (debug & QED_LOG_INFO_MASK) {
+ *p_dp_level = QED_LEVEL_INFO;
+ } else if (debug & QED_LOG_NOTICE_MASK) {
+ *p_dp_level = QED_LEVEL_NOTICE;
+ }
+}
+
+static void qede_free_fp_array(struct qede_dev *edev)
+{
+ if (edev->fp_array) {
+ struct qede_fastpath *fp;
+ int i;
+
+ for_each_rss(i) {
+ fp = &edev->fp_array[i];
+
+ kfree(fp->sb_info);
+ kfree(fp->rxq);
+ kfree(fp->txqs);
+ }
+ kfree(edev->fp_array);
+ }
+ edev->num_rss = 0;
+}
+
+static int qede_alloc_fp_array(struct qede_dev *edev)
+{
+ struct qede_fastpath *fp;
+ int i;
+
+ edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+ sizeof(*edev->fp_array), GFP_KERNEL);
+ if (!edev->fp_array) {
+ DP_NOTICE(edev, "fp array allocation failed\n");
+ goto err;
+ }
+
+ for_each_rss(i) {
+ fp = &edev->fp_array[i];
+
+ fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+ if (!fp->sb_info) {
+ DP_NOTICE(edev, "sb info struct allocation failed\n");
+ goto err;
+ }
+
+ fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq) {
+ DP_NOTICE(edev, "RXQ struct allocation failed\n");
+ goto err;
+ }
+
+ fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
+ if (!fp->txqs) {
+ DP_NOTICE(edev, "TXQ array allocation failed\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ qede_free_fp_array(edev);
+ return -ENOMEM;
+}
+
+static void qede_sp_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ sp_task.work);
+ mutex_lock(&edev->qede_lock);
+
+ if (edev->state == QEDE_STATE_OPEN) {
+ if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+ qede_config_rx_mode(edev->ndev);
+ }
+
+ mutex_unlock(&edev->qede_lock);
+}
+
+static void qede_update_pf_params(struct qed_dev *cdev)
+{
+ struct qed_pf_params pf_params;
+
+ /* 16 rx + 16 tx */
+ memset(&pf_params, 0, sizeof(struct qed_pf_params));
+ pf_params.eth_pf_params.num_cons = 32;
+ qed_ops->common->update_pf_params(cdev, &pf_params);
+}
+
+enum qede_probe_mode {
+ QEDE_PROBE_NORMAL,
+};
+
+static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+ enum qede_probe_mode mode)
+{
+ struct qed_slowpath_params params;
+ struct qed_dev_eth_info dev_info;
+ struct qede_dev *edev;
+ struct qed_dev *cdev;
+ int rc;
+
+ if (unlikely(dp_level & QED_LEVEL_INFO))
+ pr_notice("Starting qede probe\n");
+
+ cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
+ dp_module, dp_level);
+ if (!cdev) {
+ rc = -ENODEV;
+ goto err0;
+ }
+
+ qede_update_pf_params(cdev);
+
+ /* Start the Slowpath-process */
+ memset(&params, 0, sizeof(struct qed_slowpath_params));
+ params.int_mode = QED_INT_MODE_MSIX;
+ params.drv_major = QEDE_MAJOR_VERSION;
+ params.drv_minor = QEDE_MINOR_VERSION;
+ params.drv_rev = QEDE_REVISION_VERSION;
+ params.drv_eng = QEDE_ENGINEERING_VERSION;
+ strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ rc = qed_ops->common->slowpath_start(cdev, &params);
+ if (rc) {
+ pr_notice("Cannot start slowpath\n");
+ goto err1;
+ }
+
+ /* Learn information crucial for qede to progress */
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto err2;
+
+ edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+ dp_level);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ qede_init_ndev(edev);
+
+ rc = register_netdev(edev->ndev);
+ if (rc) {
+ DP_NOTICE(edev, "Cannot register net-device\n");
+ goto err3;
+ }
+
+ edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+
+ edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
+
+ DP_INFO(edev, "Ending successfully qede probe\n");
+
+ return 0;
+
+err3:
+ free_netdev(edev->ndev);
+err2:
+ qed_ops->common->slowpath_stop(cdev);
+err1:
+ qed_ops->common->remove(cdev);
+err0:
+ return rc;
+}
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ qede_config_debug(debug, &dp_module, &dp_level);
+
+ return __qede_probe(pdev, dp_module, dp_level,
+ QEDE_PROBE_NORMAL);
+}
+
+enum qede_remove_mode {
+ QEDE_REMOVE_NORMAL,
+};
+
+static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_INFO(edev, "Starting qede_remove\n");
+
+ cancel_delayed_work_sync(&edev->sp_task);
+ unregister_netdev(ndev);
+
+ edev->ops->common->set_power_state(cdev, PCI_D0);
+
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(ndev);
+
+ /* Use global ops since we've freed edev */
+ qed_ops->common->slowpath_stop(cdev);
+ qed_ops->common->remove(cdev);
+
+ pr_notice("Ending successfully qede_remove\n");
+}
+
+static void qede_remove(struct pci_dev *pdev)
+{
+ __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
+/* -------------------------------------------------------------------------
+ * START OF LOAD / UNLOAD
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_set_num_queues(struct qede_dev *edev)
+{
+ int rc;
+ u16 rss_num;
+
+ /* Setup queues according to possible resources*/
+ rss_num = netif_get_num_default_rss_queues() *
+ edev->dev_info.common.num_hwfns;
+
+ rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
+
+ rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
+ if (rc > 0) {
+ /* Managed to request interrupts for our queues */
+ edev->num_rss = rc;
+ DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
+ QEDE_RSS_CNT(edev), rss_num);
+ rc = 0;
+ }
+ return rc;
+}
+
+static void qede_free_mem_sb(struct qede_dev *edev,
+ struct qed_sb_info *sb_info)
+{
+ if (sb_info->sb_virt)
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+}
+
+/* This function allocates fast-path status block memory */
+static int qede_alloc_mem_sb(struct qede_dev *edev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&edev->pdev->dev,
+ sizeof(*sb_virt),
+ &sb_phys, GFP_KERNEL);
+ if (!sb_virt) {
+ DP_ERR(edev, "Status block allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rc = edev->ops->common->sb_init(edev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
+ if (rc) {
+ DP_ERR(edev, "Status block initialization failed\n");
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_free_rx_buffers(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ u16 i;
+
+ for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
+ struct sw_rx_data *rx_buf;
+ u8 *data;
+
+ rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
+ data = rx_buf->data;
+
+ dma_unmap_single(&edev->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+ rx_buf->data = NULL;
+ kfree(data);
+ }
+}
+
+static void qede_free_mem_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ /* Free rx buffers */
+ qede_free_rx_buffers(edev, rxq);
+
+ /* Free the parallel SW ring */
+ kfree(rxq->sw_rx_ring);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
+}
+
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ struct sw_rx_data *sw_rx_data;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ u16 rx_buf_size;
+ u8 *data;
+
+ rx_buf_size = rxq->rx_buf_size;
+
+ data = kmalloc(rx_buf_size, GFP_ATOMIC);
+ if (unlikely(!data)) {
+ DP_NOTICE(edev, "Failed to allocate Rx data\n");
+ return -ENOMEM;
+ }
+
+ mapping = dma_map_single(&edev->pdev->dev, data,
+ rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ kfree(data);
+ DP_NOTICE(edev, "Failed to map Rx buffer\n");
+ return -ENOMEM;
+ }
+
+ sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->data = data;
+
+ dma_unmap_addr_set(sw_rx_data, mapping, mapping);
+
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+ WARN_ON(!rx_bd);
+ rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+ rxq->sw_rx_prod++;
+
+ return 0;
+}
+
+/* This function allocates all memory needed per Rx queue */
+static int qede_alloc_mem_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ int i, rc, size, num_allocated;
+
+ rxq->num_rx_buffers = edev->q_num_rx_buffers;
+
+ rxq->rx_buf_size = NET_IP_ALIGN +
+ ETH_OVERHEAD +
+ edev->ndev->mtu +
+ QEDE_FW_RX_ALIGN_END;
+
+ /* Allocate the parallel driver ring for Rx buffers */
+ size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
+ rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!rxq->sw_rx_ring) {
+ DP_ERR(edev, "Rx buffers ring allocation failed\n");
+ goto err;
+ }
+
+ /* Allocate FW Rx ring */
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_NEXT_PTR,
+ NUM_RX_BDS_MAX,
+ sizeof(struct eth_rx_bd),
+ &rxq->rx_bd_ring);
+
+ if (rc)
+ goto err;
+
+ /* Allocate FW completion ring */
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ NUM_RX_BDS_MAX,
+ sizeof(union eth_rx_cqe),
+ &rxq->rx_comp_ring);
+ if (rc)
+ goto err;
+
+ /* Allocate buffers for the Rx ring */
+ for (i = 0; i < rxq->num_rx_buffers; i++) {
+ rc = qede_alloc_rx_buffer(edev, rxq);
+ if (rc)
+ break;
+ }
+ num_allocated = i;
+ if (!num_allocated) {
+ DP_ERR(edev, "Rx buffers allocation failed\n");
+ goto err;
+ } else if (num_allocated < rxq->num_rx_buffers) {
+ DP_NOTICE(edev,
+ "Allocated less buffers than desired (%d allocated)\n",
+ num_allocated);
+ }
+
+ return 0;
+
+err:
+ qede_free_mem_rxq(edev, rxq);
+ return -ENOMEM;
+}
+
+static void qede_free_mem_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ /* Free the parallel SW ring */
+ kfree(txq->sw_tx_ring);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
+}
+
+/* This function allocates all memory needed per Tx queue */
+static int qede_alloc_mem_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ int size, rc;
+ union eth_tx_bd_types *p_virt;
+
+ txq->num_tx_buffers = edev->q_num_tx_buffers;
+
+ /* Allocate the parallel driver ring for Tx buffers */
+ size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+ txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring) {
+ DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
+ goto err;
+ }
+
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ NUM_TX_BDS_MAX,
+ sizeof(*p_virt),
+ &txq->tx_pbl);
+ if (rc)
+ goto err;
+
+ return 0;
+
+err:
+ qede_free_mem_txq(edev, txq);
+ return -ENOMEM;
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *edev,
+ struct qede_fastpath *fp)
+{
+ int tc;
+
+ qede_free_mem_sb(edev, fp->sb_info);
+
+ qede_free_mem_rxq(edev, fp->rxq);
+
+ for (tc = 0; tc < edev->num_tc; tc++)
+ qede_free_mem_txq(edev, &fp->txqs[tc]);
+}
+
+/* This function allocates all memory needed for a single fp (i.e. an entity
+ * which contains status block, one rx queue and multiple per-TC tx queues.
+ */
+static int qede_alloc_mem_fp(struct qede_dev *edev,
+ struct qede_fastpath *fp)
+{
+ int rc, tc;
+
+ rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
+ if (rc)
+ goto err;
+
+ rc = qede_alloc_mem_rxq(edev, fp->rxq);
+ if (rc)
+ goto err;
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ qede_free_mem_fp(edev, fp);
+ return -ENOMEM;
+}
+
+static void qede_free_mem_load(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ qede_free_mem_fp(edev, fp);
+ }
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *edev)
+{
+ int rc = 0, rss_id;
+
+ for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
+ struct qede_fastpath *fp = &edev->fp_array[rss_id];
+
+ rc = qede_alloc_mem_fp(edev, fp);
+ if (rc)
+ break;
+ }
+
+ if (rss_id != QEDE_RSS_CNT(edev)) {
+ /* Failed allocating memory for all the queues */
+ if (!rss_id) {
+ DP_ERR(edev,
+ "Failed to allocate memory for the leading queue\n");
+ rc = -ENOMEM;
+ } else {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
+ QEDE_RSS_CNT(edev), rss_id);
+ }
+ edev->num_rss = rss_id;
+ }
+
+ return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ structures */
+static void qede_init_fp(struct qede_dev *edev)
+{
+ int rss_id, txq_index, tc;
+ struct qede_fastpath *fp;
+
+ for_each_rss(rss_id) {
+ fp = &edev->fp_array[rss_id];
+
+ fp->edev = edev;
+ fp->rss_id = rss_id;
+
+ memset((void *)&fp->napi, 0, sizeof(fp->napi));
+
+ memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+
+ memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+ fp->rxq->rxq_id = rss_id;
+
+ memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
+ fp->txqs[tc].index = txq_index;
+ }
+
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ edev->ndev->name, rss_id);
+ }
+}
+
+static int qede_set_real_num_queues(struct qede_dev *edev)
+{
+ int rc = 0;
+
+ rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
+ return rc;
+ }
+ rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_napi_disable_remove(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ napi_disable(&edev->fp_array[i].napi);
+
+ netif_napi_del(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_napi_add_enable(struct qede_dev *edev)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rss(i) {
+ netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
+ qede_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_sync_free_irqs(struct qede_dev *edev)
+{
+ int i;
+
+ for (i = 0; i < edev->int_info.used_cnt; i++) {
+ if (edev->int_info.msix_cnt) {
+ synchronize_irq(edev->int_info.msix[i].vector);
+ free_irq(edev->int_info.msix[i].vector,
+ &edev->fp_array[i]);
+ } else {
+ edev->ops->common->simd_handler_clean(edev->cdev, i);
+ }
+ }
+
+ edev->int_info.used_cnt = 0;
+}
+
+static int qede_req_msix_irqs(struct qede_dev *edev)
+{
+ int i, rc;
+
+ /* Sanitize number of interrupts == number of prepared RSS queues */
+ if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+ DP_ERR(edev,
+ "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
+ QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+ rc = request_irq(edev->int_info.msix[i].vector,
+ qede_msix_fp_int, 0, edev->fp_array[i].name,
+ &edev->fp_array[i]);
+ if (rc) {
+ DP_ERR(edev, "Request fp %d irq failed\n", i);
+ qede_sync_free_irqs(edev);
+ return rc;
+ }
+ DP_VERBOSE(edev, NETIF_MSG_INTR,
+ "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
+ edev->fp_array[i].name, i,
+ &edev->fp_array[i]);
+ edev->int_info.used_cnt++;
+ }
+
+ return 0;
+}
+
+static void qede_simd_fp_handler(void *cookie)
+{
+ struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
+
+ napi_schedule_irqoff(&fp->napi);
+}
+
+static int qede_setup_irqs(struct qede_dev *edev)
+{
+ int i, rc = 0;
+
+ /* Learn Interrupt configuration */
+ rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
+ if (rc)
+ return rc;
+
+ if (edev->int_info.msix_cnt) {
+ rc = qede_req_msix_irqs(edev);
+ if (rc)
+ return rc;
+ edev->ndev->irq = edev->int_info.msix[0].vector;
+ } else {
+ const struct qed_common_ops *ops;
+
+ /* qed should learn receive the RSS ids and callbacks */
+ ops = edev->ops->common;
+ for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+ ops->simd_handler_config(edev->cdev,
+ &edev->fp_array[i], i,
+ qede_simd_fp_handler);
+ edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+ }
+ return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ bool allow_drain)
+{
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ if (!cnt) {
+ if (allow_drain) {
+ DP_NOTICE(edev,
+ "Tx queue[%d] is stuck, requesting MCP to drain\n",
+ txq->index);
+ rc = edev->ops->common->drain(edev->cdev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(edev, txq, false);
+ }
+ DP_NOTICE(edev,
+ "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
+ txq->index, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -ENODEV;
+ }
+ cnt--;
+ usleep_range(1000, 2000);
+ barrier();
+ }
+
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *edev)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qed_dev *cdev = edev->cdev;
+ int rc, tc, i;
+
+ /* Disable the vport */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = 0;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 0;
+ vport_update_params.update_rss_flg = 0;
+
+ rc = edev->ops->vport_update(cdev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return rc;
+ }
+
+ /* Flush Tx queues. If needed, request drain from MCP */
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
+
+ rc = qede_drain_txq(edev, txq, true);
+ if (rc)
+ return rc;
+ }
+ }
+
+ /* Stop all Queues in reverse order*/
+ for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+ struct qed_stop_rxq_params rx_params;
+
+ /* Stop the Tx Queue(s)*/
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qed_stop_txq_params tx_params;
+
+ tx_params.rss_id = i;
+ tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
+ rc = edev->ops->q_tx_stop(cdev, &tx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ tx_params.tx_queue_id);
+ return rc;
+ }
+ }
+
+ /* Stop the Rx Queue*/
+ memset(&rx_params, 0, sizeof(rx_params));
+ rx_params.rss_id = i;
+ rx_params.rx_queue_id = i;
+
+ rc = edev->ops->q_rx_stop(cdev, &rx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
+ }
+
+ /* Stop the vport */
+ rc = edev->ops->vport_stop(cdev, 0);
+ if (rc)
+ DP_ERR(edev, "Failed to stop VPORT\n");
+
+ return rc;
+}
+
+static int qede_start_queues(struct qede_dev *edev)
+{
+ int rc, tc, i;
+ int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
+ struct qed_dev *cdev = edev->cdev;
+ struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
+ struct qed_update_vport_params vport_update_params;
+ struct qed_queue_start_common_params q_params;
+
+ if (!edev->num_rss) {
+ DP_ERR(edev,
+ "Cannot update V-VPORT as active as there are no Rx queues\n");
+ return -EINVAL;
+ }
+
+ rc = edev->ops->vport_start(cdev, vport_id,
+ edev->ndev->mtu,
+ drop_ttl0_flg,
+ vlan_removal_en);
+
+ if (rc) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP,
+ "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
+ vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+ dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = i;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
+
+ rc = edev->ops->q_rx_start(cdev, &q_params,
+ fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ phys_table,
+ fp->rxq->rx_comp_ring.page_cnt,
+ &fp->rxq->hw_rxq_prod_addr);
+ if (rc) {
+ DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
+ return rc;
+ }
+
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+ qede_update_rx_prod(edev, fp->rxq);
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
+ int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = txq_index;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = TX_PI(tc);
+
+ rc = edev->ops->q_tx_start(cdev, &q_params,
+ txq->tx_pbl.pbl.p_phys_table,
+ txq->tx_pbl.page_cnt,
+ &txq->doorbell_addr);
+ if (rc) {
+ DP_ERR(edev, "Start TXQ #%d failed %d\n",
+ txq_index, rc);
+ return rc;
+ }
+
+ txq->hw_cons_ptr =
+ &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ }
+ }
+
+ /* Prepare and send the vport enable */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport_id;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 1;
+
+ /* Fill struct with RSS params */
+ if (QEDE_RSS_CNT(edev) > 1) {
+ vport_update_params.update_rss_flg = 1;
+ for (i = 0; i < 128; i++)
+ rss_params->rss_ind_table[i] =
+ ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
+ netdev_rss_key_fill(rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ } else {
+ memset(rss_params, 0, sizeof(*rss_params));
+ }
+ memcpy(&vport_update_params.rss_params, rss_params,
+ sizeof(*rss_params));
+
+ rc = edev->ops->vport_update(cdev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char *mac, int num_macs)
+{
+ struct qed_filter_params filter_cmd;
+ int i;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_MCAST;
+ filter_cmd.filter.mcast.type = opcode;
+ filter_cmd.filter.mcast.num = num_macs;
+
+ for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+ ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+enum qede_unload_mode {
+ QEDE_UNLOAD_NORMAL,
+};
+
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+{
+ struct qed_link_params link_params;
+ int rc;
+
+ DP_INFO(edev, "Starting qede unload\n");
+
+ mutex_lock(&edev->qede_lock);
+ edev->state = QEDE_STATE_CLOSED;
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ /* Reset the link */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = false;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+ rc = qede_stop_queues(edev);
+ if (rc) {
+ qede_sync_free_irqs(edev);
+ goto out;
+ }
+
+ DP_INFO(edev, "Stopped Queues\n");
+
+ edev->ops->fastpath_stop(edev->cdev);
+
+ /* Release the interrupts */
+ qede_sync_free_irqs(edev);
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+
+ qede_napi_disable_remove(edev);
+
+ qede_free_mem_load(edev);
+ qede_free_fp_array(edev);
+
+out:
+ mutex_unlock(&edev->qede_lock);
+ DP_INFO(edev, "Ending qede unload\n");
+}
+
+enum qede_load_mode {
+ QEDE_LOAD_NORMAL,
+};
+
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+{
+ struct qed_link_params link_params;
+ struct qed_link_output link_output;
+ int rc;
+
+ DP_INFO(edev, "Starting qede load\n");
+
+ rc = qede_set_num_queues(edev);
+ if (rc)
+ goto err0;
+
+ rc = qede_alloc_fp_array(edev);
+ if (rc)
+ goto err0;
+
+ qede_init_fp(edev);
+
+ rc = qede_alloc_mem_load(edev);
+ if (rc)
+ goto err1;
+ DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+ QEDE_RSS_CNT(edev), edev->num_tc);
+
+ rc = qede_set_real_num_queues(edev);
+ if (rc)
+ goto err2;
+
+ qede_napi_add_enable(edev);
+ DP_INFO(edev, "Napi added and enabled\n");
+
+ rc = qede_setup_irqs(edev);
+ if (rc)
+ goto err3;
+ DP_INFO(edev, "Setup IRQs succeeded\n");
+
+ rc = qede_start_queues(edev);
+ if (rc)
+ goto err4;
+ DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+ /* Add primary mac and set Rx filters */
+ ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
+
+ mutex_lock(&edev->qede_lock);
+ edev->state = QEDE_STATE_OPEN;
+ mutex_unlock(&edev->qede_lock);
+
+ /* Ask for link-up using current configuration */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Query whether link is already-up */
+ memset(&link_output, 0, sizeof(link_output));
+ edev->ops->common->get_link(edev->cdev, &link_output);
+ qede_link_update(edev, &link_output);
+
+ DP_INFO(edev, "Ending successfully qede load\n");
+
+ return 0;
+
+err4:
+ qede_sync_free_irqs(edev);
+ memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
+err3:
+ qede_napi_disable_remove(edev);
+err2:
+ qede_free_mem_load(edev);
+err1:
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+ qede_free_fp_array(edev);
+ edev->num_rss = 0;
+err0:
+ return rc;
+}
+
+void qede_reload(struct qede_dev *edev,
+ void (*func)(struct qede_dev *, union qede_reload_args *),
+ union qede_reload_args *args)
+{
+ qede_unload(edev, QEDE_UNLOAD_NORMAL);
+ /* Call function handler to update parameters
+ * needed for function load.
+ */
+ if (func)
+ func(edev, args);
+
+ qede_load(edev, QEDE_LOAD_NORMAL);
+
+ mutex_lock(&edev->qede_lock);
+ qede_config_rx_mode(edev->ndev);
+ mutex_unlock(&edev->qede_lock);
+}
+
+/* called with rtnl_lock */
+static int qede_open(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ netif_carrier_off(ndev);
+
+ edev->ops->common->set_power_state(edev->cdev, PCI_D0);
+
+ return qede_load(edev, QEDE_LOAD_NORMAL);
+}
+
+static int qede_close(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ qede_unload(edev, QEDE_UNLOAD_NORMAL);
+
+ return 0;
+}
+
+static void qede_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qede_dev *edev = dev;
+
+ if (!netif_running(edev->ndev)) {
+ DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+ return;
+ }
+
+ if (link->link_up) {
+ DP_NOTICE(edev, "Link is up\n");
+ netif_tx_start_all_queues(edev->ndev);
+ netif_carrier_on(edev->ndev);
+ } else {
+ DP_NOTICE(edev, "Link is down\n");
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+ }
+}
+
+static int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+ int rc;
+
+ ASSERT_RTNL(); /* @@@TBD To be removed */
+
+ DP_INFO(edev, "Set_mac_addr called\n");
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ DP_NOTICE(edev, "The MAC address is not valid\n");
+ return -EFAULT;
+ }
+
+ ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+ if (!netif_running(ndev)) {
+ DP_NOTICE(edev, "The device is currently down\n");
+ return 0;
+ }
+
+ /* Remove the previous primary mac */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ edev->primary_mac);
+ if (rc)
+ return rc;
+
+ /* Add MAC filter according to the new unicast HW MAC address */
+ ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+ return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+ enum qed_filter_rx_mode_type *accept_flags)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ unsigned char *mc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc = 0, mc_count;
+ size_t size;
+
+ size = 64 * ETH_ALEN;
+
+ mc_macs = kzalloc(size, GFP_KERNEL);
+ if (!mc_macs) {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for multicast MACs\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ temp = mc_macs;
+
+ /* Remove all previously configured MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ mc_macs, 1);
+ if (rc)
+ goto exit;
+
+ netif_addr_lock_bh(ndev);
+
+ mc_count = netdev_mc_count(ndev);
+ if (mc_count < 64) {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Check for all multicast @@@TBD resource allocation */
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ (mc_count > 64)) {
+ if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+ *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+ } else {
+ /* Add all multicast MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ mc_macs, mc_count);
+ }
+
+exit:
+ kfree(mc_macs);
+ return rc;
+}
+
+static void qede_set_rx_mode(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_INFO(edev, "qede_set_rx_mode called\n");
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_INFO(edev,
+ "qede_set_rx_mode called while interface is down\n");
+ } else {
+ set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+ }
+}
+
+/* Must be called with qede_lock held */
+static void qede_config_rx_mode(struct net_device *ndev)
+{
+ enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qed_filter_params rx_mode;
+ unsigned char *uc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc, uc_count;
+ size_t size;
+
+ netif_addr_lock_bh(ndev);
+
+ uc_count = netdev_uc_count(ndev);
+ size = uc_count * ETH_ALEN;
+
+ uc_macs = kzalloc(size, GFP_ATOMIC);
+ if (!uc_macs) {
+ DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+ netif_addr_unlock_bh(ndev);
+ return;
+ }
+
+ temp = uc_macs;
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Configure the struct for the Rx mode */
+ memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+ rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+ /* Remove all previous unicast secondary macs and multicast macs
+ * (configrue / leave the primary mac)
+ */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+ edev->primary_mac);
+ if (rc)
+ goto out;
+
+ /* Check for promiscuous */
+ if ((ndev->flags & IFF_PROMISC) ||
+ (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+ accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ } else {
+ /* Add MAC filters according to the unicast secondary macs */
+ int i;
+
+ temp = uc_macs;
+ for (i = 0; i < uc_count; i++) {
+ rc = qede_set_ucast_rx_mac(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ temp);
+ if (rc)
+ goto out;
+
+ temp += ETH_ALEN;
+ }
+
+ rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+ if (rc)
+ goto out;
+ }
+
+ rx_mode.filter.accept_flags = accept_flags;
+ edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+ kfree(uc_macs);
+}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 4847713211ca..b09a6b80d107 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1736,8 +1736,6 @@ static void ql_get_drvinfo(struct net_device *ndev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
static u32 ql_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index d6696cfa11d2..46bbea8e023c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1092,7 +1092,7 @@ struct qlcnic_filter_hash {
struct qlcnic_mailbox {
struct workqueue_struct *work_q;
struct qlcnic_adapter *adapter;
- struct qlcnic_mbx_ops *ops;
+ const struct qlcnic_mbx_ops *ops;
struct work_struct work;
struct completion completion;
struct list_head cmd_q;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 9f0bdd993955..37a731be7d39 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -4048,7 +4048,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
work);
struct qlcnic_adapter *adapter = mbx->adapter;
- struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
struct device *dev = &adapter->pdev->dev;
atomic_t *rsp_status = &mbx->rsp_status;
struct list_head *head = &mbx->cmd_q;
@@ -4098,7 +4098,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
}
}
-static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+static const struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
.enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
.dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
.decode_resp = qlcnic_83xx_decode_mbx_rsp,
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index c3c514e332b5..5dade1fd08b8 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -415,13 +415,6 @@ static void ql_get_drvinfo(struct net_device *ndev,
(qdev->fw_rev_id & 0x000000ff));
strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
- else
- drvinfo->regdump_len = sizeof(struct ql_reg_dump);
- drvinfo->eedump_len = 0;
}
static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 686334f4588d..deae10d7426d 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -175,7 +175,7 @@ enum {
LastFrag = (1 << 28), /* Final segment of a packet */
LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
MSSShift = 16, /* MSS value position */
- MSSMask = 0xfff, /* MSS value: 11 bits */
+ MSSMask = 0x7ff, /* MSS value: 11 bits */
TxError = (1 << 23), /* Tx error summary */
RxError = (1 << 20), /* Rx error summary */
IPCS = (1 << 18), /* Calculate IP checksum */
@@ -754,10 +754,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
mss = skb_shinfo(skb)->gso_size;
+ if (mss > MSSMask) {
+ WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
+ mss);
+ goto out_dma_error;
+ }
+
opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
opts1 = DescOwn;
if (mss)
- opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
+ opts1 |= LargeSend | (mss << MSSShift);
else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
@@ -1852,6 +1858,15 @@ static void cp_set_d3_state (struct cp_private *cp)
pci_set_power_state (cp->pdev, PCI_D3hot);
}
+static netdev_features_t cp_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->gso_size > MSSMask)
+ features &= ~NETIF_F_TSO;
+
+ return vlan_features_check(skb, features);
+}
static const struct net_device_ops cp_netdev_ops = {
.ndo_open = cp_open,
.ndo_stop = cp_close,
@@ -1864,6 +1879,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_tx_timeout = cp_tx_timeout,
.ndo_set_features = cp_set_features,
.ndo_change_mtu = cp_change_mtu,
+ .ndo_features_check = cp_features_check,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cp_poll_controller,
@@ -1983,12 +1999,12 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &cp_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
- /* disabled by default until verified */
dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 78bb4ceb1cdd..ef668d300800 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2388,7 +2388,6 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
- info->regdump_len = tp->regs_len;
}
static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index a157aaaaff6a..0623fff932e4 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -766,6 +766,11 @@ struct ravb_ptp {
struct ravb_ptp_perout perout[N_PER_OUT];
};
+enum ravb_chip_id {
+ RCAR_GEN2,
+ RCAR_GEN3,
+};
+
struct ravb_private {
struct net_device *ndev;
struct platform_device *pdev;
@@ -806,6 +811,8 @@ struct ravb_private {
int msg_enable;
int speed;
int duplex;
+ int emac_irq;
+ enum ravb_chip_id chip_id;
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 450899e9cea2..aa7b2083cb53 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -201,7 +201,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
if (priv->rx_ring[q]) {
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
- dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
priv->rx_desc_dma[q]);
priv->rx_ring[q] = NULL;
}
@@ -209,7 +209,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
if (priv->tx_ring[q]) {
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1);
- dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
}
@@ -240,13 +240,13 @@ static void ravb_ring_format(struct net_device *ndev, int q)
rx_desc = &priv->rx_ring[q][i];
/* The size of the buffer should be on 16-byte boundary. */
rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
- dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
+ dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
ALIGN(PKT_BUF_SZ, 16),
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
*/
- if (dma_mapping_error(&ndev->dev, dma_addr))
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
rx_desc->ds_cc = cpu_to_le16(0);
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
@@ -309,7 +309,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
/* Allocate all RX descriptors. */
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
- priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
if (!priv->rx_ring[q])
@@ -320,7 +320,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
/* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1);
- priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->tx_desc_dma[q],
GFP_KERNEL);
if (!priv->tx_ring[q])
@@ -443,7 +443,7 @@ static int ravb_tx_free(struct net_device *ndev, int q)
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
- dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
@@ -546,7 +546,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
- dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
ALIGN(PKT_BUF_SZ, 16),
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
@@ -586,14 +586,14 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
if (!skb)
break; /* Better luck next round. */
ravb_set_buffer_align(skb);
- dma_addr = dma_map_single(&ndev->dev, skb->data,
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data,
le16_to_cpu(desc->ds_cc),
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
* which should prevent DMA from happening...
*/
- if (dma_mapping_error(&ndev->dev, dma_addr))
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
desc->ds_cc = cpu_to_le16(0);
desc->dptr = cpu_to_le32(dma_addr);
priv->rx_skb[q][entry] = skb;
@@ -889,6 +889,22 @@ static int ravb_phy_init(struct net_device *ndev)
return -ENOENT;
}
+ /* This driver only support 10/100Mbit speeds on Gen3
+ * at this time.
+ */
+ if (priv->chip_id == RCAR_GEN3) {
+ int err;
+
+ err = phy_set_max_speed(phydev, SPEED_100);
+ if (err) {
+ netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
+ phy_disconnect(phydev);
+ return err;
+ }
+
+ netdev_info(ndev, "limited PHY to 100Mbit/s\n");
+ }
+
netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
phydev->addr, phydev->irq, phydev->drv->name);
@@ -1197,6 +1213,15 @@ static int ravb_open(struct net_device *ndev)
goto out_napi_off;
}
+ if (priv->chip_id == RCAR_GEN3) {
+ error = request_irq(priv->emac_irq, ravb_interrupt,
+ IRQF_SHARED, ndev->name, ndev);
+ if (error) {
+ netdev_err(ndev, "cannot request IRQ\n");
+ goto out_free_irq;
+ }
+ }
+
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
@@ -1220,6 +1245,7 @@ out_ptp_stop:
ravb_ptp_stop(ndev);
out_free_irq:
free_irq(ndev->irq, ndev);
+ free_irq(priv->emac_irq, ndev);
out_napi_off:
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
@@ -1300,8 +1326,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
entry / NUM_TX_DESC * DPTR_ALIGN;
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
memcpy(buffer, skb->data, len);
- dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr))
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto drop;
desc = &priv->tx_ring[q][entry];
@@ -1310,8 +1336,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
buffer = skb->data + len;
len = skb->len - len;
- dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr))
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto unmap;
desc++;
@@ -1323,7 +1349,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
if (!ts_skb) {
desc--;
- dma_unmap_single(&ndev->dev, dma_addr, len,
+ dma_unmap_single(ndev->dev.parent, dma_addr, len,
DMA_TO_DEVICE);
goto unmap;
}
@@ -1358,7 +1384,7 @@ exit:
return NETDEV_TX_OK;
unmap:
- dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
drop:
dev_kfree_skb_any(skb);
@@ -1625,10 +1651,20 @@ static int ravb_mdio_release(struct ravb_private *priv)
return 0;
}
+static const struct of_device_id ravb_match_table[] = {
+ { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ravb_match_table);
+
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
struct ravb_private *priv;
+ enum ravb_chip_id chip_id;
struct net_device *ndev;
int error, irq, q;
struct resource *res;
@@ -1657,7 +1693,14 @@ static int ravb_probe(struct platform_device *pdev)
/* The Ether-specific entries in the device structure. */
ndev->base_addr = res->start;
ndev->dma = -1;
- irq = platform_get_irq(pdev, 0);
+
+ match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
+ chip_id = (enum ravb_chip_id)match->data;
+
+ if (chip_id == RCAR_GEN3)
+ irq = platform_get_irq_byname(pdev, "ch22");
+ else
+ irq = platform_get_irq(pdev, 0);
if (irq < 0) {
error = irq;
goto out_release;
@@ -1688,6 +1731,17 @@ static int ravb_probe(struct platform_device *pdev)
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
+ if (chip_id == RCAR_GEN3) {
+ irq = platform_get_irq_byname(pdev, "ch24");
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->emac_irq = irq;
+ }
+
+ priv->chip_id = chip_id;
+
/* Set function */
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
@@ -1708,10 +1762,10 @@ static int ravb_probe(struct platform_device *pdev)
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
- priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
+ priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
&priv->desc_bat_dma, GFP_KERNEL);
if (!priv->desc_bat) {
- dev_err(&ndev->dev,
+ dev_err(&pdev->dev,
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
@@ -1738,7 +1792,7 @@ static int ravb_probe(struct platform_device *pdev)
/* MDIO bus init */
error = ravb_mdio_init(priv);
if (error) {
- dev_err(&ndev->dev, "failed to initialize MDIO\n");
+ dev_err(&pdev->dev, "failed to initialize MDIO\n");
goto out_dma_free;
}
@@ -1763,7 +1817,7 @@ out_napi_del:
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
out_dma_free:
- dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
out_release:
if (ndev)
@@ -1779,7 +1833,7 @@ static int ravb_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
- dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
@@ -1818,13 +1872,6 @@ static const struct dev_pm_ops ravb_dev_pm_ops = {
#define RAVB_PM_OPS NULL
#endif
-static const struct of_device_id ravb_match_table[] = {
- { .compatible = "renesas,etheravb-r8a7790" },
- { .compatible = "renesas,etheravb-r8a7794" },
- { }
-};
-MODULE_DEVICE_TABLE(of, ravb_match_table);
-
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
.remove = ravb_remove,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a484d8beb855..6150a235b72c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1173,7 +1173,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
- rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
memset(mdp->tx_ring, 0, tx_ringsize);
@@ -1212,15 +1212,15 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
- sizeof(*mdp->rx_skbuff), GFP_KERNEL);
+ mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
+ GFP_KERNEL);
if (!mdp->rx_skbuff) {
ret = -ENOMEM;
return ret;
}
- mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
- sizeof(*mdp->tx_skbuff), GFP_KERNEL);
+ mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
+ GFP_KERNEL);
if (!mdp->tx_skbuff) {
ret = -ENOMEM;
goto skb_ring_free;
@@ -1232,7 +1232,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
GFP_KERNEL);
if (!mdp->rx_ring) {
ret = -ENOMEM;
- goto desc_ring_free;
+ goto skb_ring_free;
}
mdp->dirty_rx = 0;
@@ -1416,7 +1416,7 @@ static int sh_eth_txfree(struct net_device *ndev)
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
/* TACT bit must be checked before all the following reads */
- rmb();
+ dma_rmb();
netif_info(mdp, tx_done, ndev,
"tx entry %d status 0x%08x\n",
entry, edmac_to_cpu(mdp, txdesc->status));
@@ -1458,7 +1458,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
/* RACT bit must be checked before all the following reads */
- rmb();
+ dma_rmb();
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
@@ -1544,10 +1544,10 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
skb_checksum_none_assert(skb);
rxdesc->addr = dma_addr;
}
- wmb(); /* RACT bit must be set after all the above writes */
+ dma_wmb(); /* RACT bit must be set after all the above writes */
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
- cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE);
else
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP);
@@ -2403,7 +2403,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
txdesc->buffer_length = skb->len;
- wmb(); /* TACT bit must be set after all the above writes */
+ dma_wmb(); /* TACT bit must be set after all the above writes */
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 06dbbe5201cb..50382b1c9ddc 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -285,7 +285,7 @@ enum DMAC_IM_BIT {
/* Receive descriptor bit */
enum RD_STS_BIT {
- RD_RACT = 0x80000000, RD_RDEL = 0x40000000,
+ RD_RACT = 0x80000000, RD_RDLE = 0x40000000,
RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34ac41ac9e61..32a80d2df7ff 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -152,8 +152,9 @@ struct rocker_fdb_tbl_entry {
struct hlist_node entry;
u32 key_crc32; /* key */
bool learned;
+ unsigned long touched;
struct rocker_fdb_tbl_key {
- u32 pport;
+ struct rocker_port *rocker_port;
u8 addr[ETH_ALEN];
__be16 vlan_id;
} key;
@@ -220,13 +221,13 @@ struct rocker_port {
__be16 internal_vlan_id;
int stp_state;
u32 brport_flags;
+ unsigned long ageing_time;
bool ctrls[ROCKER_CTRL_MAX];
unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
struct napi_struct napi_tx;
struct napi_struct napi_rx;
struct rocker_dma_ring_info tx_ring;
struct rocker_dma_ring_info rx_ring;
- struct list_head trans_mem;
};
struct rocker {
@@ -246,6 +247,7 @@ struct rocker {
u64 flow_tbl_next_cookie;
DECLARE_HASHTABLE(group_tbl, 16);
spinlock_t group_tbl_lock; /* for group tbl accesses */
+ struct timer_list fdb_cleanup_timer;
DECLARE_HASHTABLE(fdb_tbl, 16);
spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
@@ -340,74 +342,63 @@ static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
#define ROCKER_OP_FLAG_REFRESH BIT(3)
static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
size_t size)
{
- struct list_head *elem = NULL;
+ struct switchdev_trans_item *elem = NULL;
gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
GFP_ATOMIC : GFP_KERNEL;
/* If in transaction prepare phase, allocate the memory
- * and enqueue it on a per-port list. If in transaction
- * commit phase, dequeue the memory from the per-port list
+ * and enqueue it on a transaction. If in transaction
+ * commit phase, dequeue the memory from the transaction
* rather than re-allocating the memory. The idea is the
* driver code paths for prepare and commit are identical
* so the memory allocated in the prepare phase is the
* memory used in the commit phase.
*/
- switch (trans) {
- case SWITCHDEV_TRANS_PREPARE:
+ if (!trans) {
+ elem = kzalloc(size + sizeof(*elem), gfp_flags);
+ } else if (switchdev_trans_ph_prepare(trans)) {
elem = kzalloc(size + sizeof(*elem), gfp_flags);
if (!elem)
return NULL;
- list_add_tail(elem, &rocker_port->trans_mem);
- break;
- case SWITCHDEV_TRANS_COMMIT:
- BUG_ON(list_empty(&rocker_port->trans_mem));
- elem = rocker_port->trans_mem.next;
- list_del_init(elem);
- break;
- case SWITCHDEV_TRANS_NONE:
- elem = kzalloc(size + sizeof(*elem), gfp_flags);
- if (elem)
- INIT_LIST_HEAD(elem);
- break;
- default:
- break;
+ switchdev_trans_item_enqueue(trans, elem, kfree, elem);
+ } else {
+ elem = switchdev_trans_item_dequeue(trans);
}
return elem ? elem + 1 : NULL;
}
static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
size_t size)
{
return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
}
static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
size_t n, size_t size)
{
return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
}
-static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
+static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem)
{
- struct list_head *elem;
+ struct switchdev_trans_item *elem;
/* Frees are ignored if in transaction prepare phase. The
* memory remains on the per-port list until freed in the
* commit phase.
*/
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
return;
- elem = (struct list_head *)mem - 1;
- BUG_ON(!list_empty(elem));
+ elem = (struct switchdev_trans_item *) mem - 1;
kfree(elem);
}
@@ -430,7 +421,7 @@ static void rocker_wait_init(struct rocker_wait *wait)
}
static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
int flags)
{
struct rocker_wait *wait;
@@ -442,7 +433,7 @@ static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
return wait;
}
-static void rocker_wait_destroy(enum switchdev_trans trans,
+static void rocker_wait_destroy(struct switchdev_trans *trans,
struct rocker_wait *wait)
{
rocker_port_kfree(trans, wait);
@@ -1408,7 +1399,7 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
wait = rocker_desc_cookie_ptr_get(desc_info);
if (wait->nowait) {
rocker_desc_gen_clear(desc_info);
- rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
+ rocker_wait_destroy(NULL, wait);
} else {
rocker_wait_wake_up(wait);
}
@@ -1463,7 +1454,7 @@ static int rocker_event_link_change(const struct rocker *rocker,
}
static int rocker_port_fdb(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
const unsigned char *addr,
__be16 vlan_id, int flags);
@@ -1496,8 +1487,7 @@ static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
rocker_port->stp_state != BR_STATE_FORWARDING)
return 0;
- return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
- addr, vlan_id, flags);
+ return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
}
static int rocker_event_process(const struct rocker *rocker,
@@ -1582,7 +1572,7 @@ typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
void *priv);
static int rocker_cmd_exec(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
rocker_cmd_prep_cb_t prepare, void *prepare_priv,
rocker_cmd_proc_cb_t process, void *process_priv)
{
@@ -1615,7 +1605,7 @@ static int rocker_cmd_exec(struct rocker_port *rocker_port,
rocker_desc_cookie_ptr_set(desc_info, wait);
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
@@ -1623,7 +1613,7 @@ static int rocker_cmd_exec(struct rocker_port *rocker_port,
if (nowait)
return 0;
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
if (!rocker_wait_event_timeout(wait, HZ / 10))
return -EIO;
@@ -1875,7 +1865,7 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_ethtool_proc,
ecmd);
@@ -1884,7 +1874,7 @@ static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_macaddr_proc,
macaddr);
@@ -1893,7 +1883,7 @@ static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_set_port_settings_ethtool_prep,
ecmd, NULL, NULL);
}
@@ -1901,7 +1891,7 @@ static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_set_port_settings_macaddr_prep,
macaddr, NULL, NULL);
}
@@ -1909,13 +1899,13 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
int mtu)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_set_port_settings_mtu_prep,
&mtu, NULL, NULL);
}
static int rocker_port_set_learning(struct rocker_port *rocker_port,
- enum switchdev_trans trans)
+ struct switchdev_trans *trans)
{
return rocker_cmd_exec(rocker_port, trans, 0,
rocker_cmd_set_port_learning_prep,
@@ -2433,7 +2423,7 @@ rocker_flow_tbl_find(const struct rocker *rocker,
}
static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2449,7 +2439,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
if (found) {
match->cookie = found->cookie;
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
rocker_port_kfree(trans, found);
found = match;
@@ -2460,7 +2450,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
}
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
@@ -2470,7 +2460,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2486,7 +2476,7 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
found = rocker_flow_tbl_find(rocker, match);
if (found) {
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
}
@@ -2506,7 +2496,7 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_flow_tbl_entry *entry)
{
if (flags & ROCKER_OP_FLAG_REMOVE)
@@ -2516,7 +2506,7 @@ static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u32 in_pport, u32 in_pport_mask,
enum rocker_of_dpa_table_id goto_tbl)
{
@@ -2536,7 +2526,7 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u32 in_pport, __be16 vlan_id,
__be16 vlan_id_mask,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2562,7 +2552,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
u32 in_pport, u32 in_pport_mask,
__be16 eth_type, const u8 *eth_dst,
const u8 *eth_dst_mask, __be16 vlan_id,
@@ -2599,7 +2589,7 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const u8 *eth_dst, const u8 *eth_dst_mask,
__be16 vlan_id, u32 tunnel_id,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2653,7 +2643,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2679,7 +2669,7 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u32 in_pport, u32 in_pport_mask,
const u8 *eth_src, const u8 *eth_src_mask,
const u8 *eth_dst, const u8 *eth_dst_mask,
@@ -2744,7 +2734,7 @@ rocker_group_tbl_find(const struct rocker *rocker,
return NULL;
}
-static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
+static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
struct rocker_group_tbl_entry *entry)
{
switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
@@ -2759,7 +2749,7 @@ static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
}
static int rocker_group_tbl_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2771,7 +2761,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
found = rocker_group_tbl_find(rocker, match);
if (found) {
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
rocker_group_tbl_entry_free(trans, found);
found = match;
@@ -2781,7 +2771,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
}
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_add(rocker->group_tbl, &found->entry, found->group_id);
spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
@@ -2791,7 +2781,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
}
static int rocker_group_tbl_del(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2804,7 +2794,7 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port,
found = rocker_group_tbl_find(rocker, match);
if (found) {
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
}
@@ -2824,7 +2814,7 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port,
}
static int rocker_group_tbl_do(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_group_tbl_entry *entry)
{
if (flags & ROCKER_OP_FLAG_REMOVE)
@@ -2834,7 +2824,7 @@ static int rocker_group_tbl_do(struct rocker_port *rocker_port,
}
static int rocker_group_l2_interface(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id, u32 out_pport,
int pop_vlan)
{
@@ -2851,7 +2841,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port,
}
static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
int flags, u8 group_count,
const u32 *group_ids, u32 group_id)
{
@@ -2876,7 +2866,7 @@ static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
}
static int rocker_group_l2_flood(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id, u8 group_count,
const u32 *group_ids, u32 group_id)
{
@@ -2886,7 +2876,7 @@ static int rocker_group_l2_flood(struct rocker_port *rocker_port,
}
static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u32 index, const u8 *src_mac, const u8 *dst_mac,
__be16 vlan_id, bool ttl_check, u32 pport)
{
@@ -2922,22 +2912,22 @@ rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
}
static void _rocker_neigh_add(struct rocker *rocker,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
struct rocker_neigh_tbl_entry *entry)
{
- if (trans != SWITCHDEV_TRANS_COMMIT)
+ if (!switchdev_trans_ph_commit(trans))
entry->index = rocker->neigh_tbl_next_index++;
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
return;
entry->ref_count++;
hash_add(rocker->neigh_tbl, &entry->entry,
be32_to_cpu(entry->ip_addr));
}
-static void _rocker_neigh_del(enum switchdev_trans trans,
+static void _rocker_neigh_del(struct switchdev_trans *trans,
struct rocker_neigh_tbl_entry *entry)
{
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
return;
if (--entry->ref_count == 0) {
hash_del(&entry->entry);
@@ -2946,19 +2936,19 @@ static void _rocker_neigh_del(enum switchdev_trans trans,
}
static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
const u8 *eth_dst, bool ttl_check)
{
if (eth_dst) {
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = ttl_check;
- } else if (trans != SWITCHDEV_TRANS_PREPARE) {
+ } else if (!switchdev_trans_ph_prepare(trans)) {
entry->ref_count++;
}
}
static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
int flags, __be32 ip_addr, const u8 *eth_dst)
{
struct rocker *rocker = rocker_port->rocker;
@@ -3050,7 +3040,8 @@ err_out:
}
static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
- enum switchdev_trans trans, __be32 ip_addr)
+ struct switchdev_trans *trans,
+ __be32 ip_addr)
{
struct net_device *dev = rocker_port->dev;
struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
@@ -3078,7 +3069,7 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
}
static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be32 ip_addr, u32 *index)
{
struct rocker *rocker = rocker_port->rocker;
@@ -3137,7 +3128,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
}
static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
int flags, __be16 vlan_id)
{
struct rocker_port *p;
@@ -3186,7 +3177,7 @@ no_ports_in_vlan:
}
static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id, bool pop_vlan)
{
const struct rocker *rocker = rocker_port->rocker;
@@ -3292,7 +3283,7 @@ static struct rocker_ctrl {
};
static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport = rocker_port->pport;
@@ -3325,7 +3316,8 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans,
+ int flags,
const struct rocker_ctrl *ctrl,
__be16 vlan_id)
{
@@ -3350,7 +3342,7 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
@@ -3374,7 +3366,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
if (ctrl->acl)
@@ -3392,7 +3384,7 @@ static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id)
{
int err = 0;
@@ -3411,7 +3403,7 @@ static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const struct rocker_ctrl *ctrl)
{
u16 vid;
@@ -3430,7 +3422,7 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port,
}
static int rocker_port_vlan(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags, u16 vid)
+ struct switchdev_trans *trans, int flags, u16 vid)
{
enum rocker_of_dpa_table_id goto_tbl =
ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -3487,14 +3479,14 @@ static int rocker_port_vlan(struct rocker_port *rocker_port,
"Error (%d) port VLAN table\n", err);
err_out:
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
return err;
}
static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
enum rocker_of_dpa_table_id goto_tbl;
u32 in_pport;
@@ -3522,7 +3514,7 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
struct rocker_fdb_learn_work {
struct work_struct work;
struct rocker_port *rocker_port;
- enum switchdev_trans trans;
+ struct switchdev_trans *trans;
int flags;
u8 addr[ETH_ALEN];
u16 vid;
@@ -3550,7 +3542,7 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
}
static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const u8 *addr, __be16 vlan_id)
{
struct rocker_fdb_learn_work *lw;
@@ -3592,7 +3584,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
ether_addr_copy(lw->addr, addr);
lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
rocker_port_kfree(trans, lw);
else
schedule_work(&lw->work);
@@ -3614,7 +3606,7 @@ rocker_fdb_tbl_find(const struct rocker *rocker,
}
static int rocker_port_fdb(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
const unsigned char *addr,
__be16 vlan_id, int flags)
{
@@ -3629,7 +3621,8 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
return -ENOMEM;
fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
- fdb->key.pport = rocker_port->pport;
+ fdb->touched = jiffies;
+ fdb->key.rocker_port = rocker_port;
ether_addr_copy(fdb->key.addr, addr);
fdb->key.vlan_id = vlan_id;
fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
@@ -3638,13 +3631,17 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
found = rocker_fdb_tbl_find(rocker, fdb);
- if (removing && found) {
- rocker_port_kfree(trans, fdb);
- if (trans != SWITCHDEV_TRANS_PREPARE)
- hash_del(&found->entry);
- } else if (!removing && !found) {
- if (trans != SWITCHDEV_TRANS_PREPARE)
- hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+ if (found) {
+ found->touched = jiffies;
+ if (removing) {
+ rocker_port_kfree(trans, fdb);
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_del(&found->entry);
+ }
+ } else if (!removing) {
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_add(rocker->fdb_tbl, &fdb->entry,
+ fdb->key_crc32);
}
spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
@@ -3662,7 +3659,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
}
static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_fdb_tbl_entry *found;
@@ -3675,12 +3672,12 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
rocker_port->stp_state == BR_STATE_FORWARDING)
return 0;
- flags |= ROCKER_OP_FLAG_REMOVE;
+ flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.pport != rocker_port->pport)
+ if (found->key.rocker_port != rocker_port)
continue;
if (!found->learned)
continue;
@@ -3689,7 +3686,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
found->key.vlan_id);
if (err)
goto err_out;
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
}
@@ -3699,8 +3696,43 @@ err_out:
return err;
}
+static void rocker_fdb_cleanup(unsigned long data)
+{
+ struct rocker *rocker = (struct rocker *)data;
+ struct rocker_port *rocker_port;
+ struct rocker_fdb_tbl_entry *entry;
+ struct hlist_node *tmp;
+ unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
+ unsigned long expires;
+ unsigned long lock_flags;
+ int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
+ ROCKER_OP_FLAG_LEARNED;
+ int bkt;
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
+ if (!entry->learned)
+ continue;
+ rocker_port = entry->key.rocker_port;
+ expires = entry->touched + rocker_port->ageing_time;
+ if (time_before_eq(expires, jiffies)) {
+ rocker_port_fdb_learn(rocker_port, NULL,
+ flags, entry->key.addr,
+ entry->key.vlan_id);
+ hash_del(&entry->entry);
+ } else if (time_before(expires, next_timer)) {
+ next_timer = expires;
+ }
+ }
+
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+ mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
+}
+
static int rocker_port_router_mac(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
@@ -3733,7 +3765,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
}
static int rocker_port_fwding(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
bool pop_vlan;
u32 out_pport;
@@ -3772,16 +3804,16 @@ static int rocker_port_fwding(struct rocker_port *rocker_port,
}
static int rocker_port_stp_update(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u8 state)
{
bool want[ROCKER_CTRL_MAX] = { 0, };
bool prev_ctrls[ROCKER_CTRL_MAX];
- u8 prev_state;
+ u8 uninitialized_var(prev_state);
int err;
int i;
- if (trans == SWITCHDEV_TRANS_PREPARE) {
+ if (switchdev_trans_ph_prepare(trans)) {
memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
prev_state = rocker_port->stp_state;
}
@@ -3833,7 +3865,7 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port,
err = rocker_port_fwding(rocker_port, trans, flags);
err_out:
- if (trans == SWITCHDEV_TRANS_PREPARE) {
+ if (switchdev_trans_ph_prepare(trans)) {
memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
rocker_port->stp_state = prev_state;
}
@@ -3842,7 +3874,7 @@ err_out:
}
static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will enable port */
@@ -3854,7 +3886,7 @@ static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
}
static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will disable port */
@@ -3952,7 +3984,7 @@ not_found:
}
static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
- enum switchdev_trans trans, __be32 dst,
+ struct switchdev_trans *trans, __be32 dst,
int dst_len, const struct fib_info *fi,
u32 tb_id, int flags)
{
@@ -4026,7 +4058,7 @@ static int rocker_port_open(struct net_device *dev)
goto err_request_rx_irq;
}
- err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+ err = rocker_port_fwd_enable(rocker_port, NULL, 0);
if (err)
goto err_fwd_enable;
@@ -4054,7 +4086,7 @@ static int rocker_port_stop(struct net_device *dev)
rocker_port_set_enable(rocker_port, false);
napi_disable(&rocker_port->napi_rx);
napi_disable(&rocker_port->napi_tx);
- rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
+ rocker_port_fwd_disable(rocker_port, NULL,
ROCKER_OP_FLAG_NOWAIT);
free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
@@ -4240,7 +4272,7 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
struct port_name name = { .buf = buf, .len = len };
int err;
- err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ err = rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_phys_name_proc,
&name);
@@ -4265,7 +4297,7 @@ static void rocker_port_neigh_destroy(struct neighbour *n)
int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
__be32 ip_addr = *(__be32 *)n->primary_key;
- rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+ rocker_port_ipv4_neigh(rocker_port, NULL,
flags, ip_addr, n->ha);
}
@@ -4297,11 +4329,11 @@ static int rocker_port_attr_get(struct net_device *dev,
const struct rocker *rocker = rocker_port->rocker;
switch (attr->id) {
- case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = sizeof(rocker->hw.id);
memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
break;
- case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
attr->u.brport_flags = rocker_port->brport_flags;
break;
default:
@@ -4311,18 +4343,8 @@ static int rocker_port_attr_get(struct net_device *dev,
return 0;
}
-static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
-{
- struct list_head *mem, *tmp;
-
- list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
- list_del(mem);
- kfree(mem);
- }
-}
-
static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
unsigned long brport_flags)
{
unsigned long orig_flags;
@@ -4333,39 +4355,44 @@ static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
err = rocker_port_set_learning(rocker_port, trans);
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
rocker_port->brport_flags = orig_flags;
return err;
}
+static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
+ struct switchdev_trans *trans,
+ u32 ageing_time)
+{
+ if (!switchdev_trans_ph_prepare(trans)) {
+ rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
+ mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
+ }
+
+ return 0;
+}
+
static int rocker_port_attr_set(struct net_device *dev,
- struct switchdev_attr *attr)
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
- switch (attr->trans) {
- case SWITCHDEV_TRANS_PREPARE:
- BUG_ON(!list_empty(&rocker_port->trans_mem));
- break;
- case SWITCHDEV_TRANS_ABORT:
- rocker_port_trans_abort(rocker_port);
- return 0;
- default:
- break;
- }
-
switch (attr->id) {
- case SWITCHDEV_ATTR_PORT_STP_STATE:
- err = rocker_port_stp_update(rocker_port, attr->trans,
- ROCKER_OP_FLAG_NOWAIT,
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = rocker_port_stp_update(rocker_port, trans, 0,
attr->u.stp_state);
break;
- case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
- err = rocker_port_brport_flags_set(rocker_port, attr->trans,
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = rocker_port_brport_flags_set(rocker_port, trans,
attr->u.brport_flags);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = rocker_port_bridge_ageing_time(rocker_port, trans,
+ attr->u.ageing_time);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -4375,7 +4402,8 @@ static int rocker_port_attr_set(struct net_device *dev,
}
static int rocker_port_vlan_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans, u16 vid, u16 flags)
+ struct switchdev_trans *trans,
+ u16 vid, u16 flags)
{
int err;
@@ -4394,8 +4422,8 @@ static int rocker_port_vlan_add(struct rocker_port *rocker_port,
}
static int rocker_port_vlans_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
- const struct switchdev_obj_vlan *vlan)
+ struct switchdev_trans *trans,
+ const struct switchdev_obj_port_vlan *vlan)
{
u16 vid;
int err;
@@ -4411,8 +4439,8 @@ static int rocker_port_vlans_add(struct rocker_port *rocker_port,
}
static int rocker_port_fdb_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
- const struct switchdev_obj_fdb *fdb)
+ struct switchdev_trans *trans,
+ const struct switchdev_obj_port_fdb *fdb)
{
__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
int flags = 0;
@@ -4424,36 +4452,27 @@ static int rocker_port_fdb_add(struct rocker_port *rocker_port,
}
static int rocker_port_obj_add(struct net_device *dev,
- struct switchdev_obj *obj)
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
{
struct rocker_port *rocker_port = netdev_priv(dev);
const struct switchdev_obj_ipv4_fib *fib4;
int err = 0;
- switch (obj->trans) {
- case SWITCHDEV_TRANS_PREPARE:
- BUG_ON(!list_empty(&rocker_port->trans_mem));
- break;
- case SWITCHDEV_TRANS_ABORT:
- rocker_port_trans_abort(rocker_port);
- return 0;
- default:
- break;
- }
-
switch (obj->id) {
- case SWITCHDEV_OBJ_PORT_VLAN:
- err = rocker_port_vlans_add(rocker_port, obj->trans,
- &obj->u.vlan);
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = rocker_port_vlans_add(rocker_port, trans,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_IPV4_FIB:
- fib4 = &obj->u.ipv4_fib;
- err = rocker_port_fib_ipv4(rocker_port, obj->trans,
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
+ err = rocker_port_fib_ipv4(rocker_port, trans,
htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id, 0);
+ &fib4->fi, fib4->tb_id, 0);
break;
- case SWITCHDEV_OBJ_PORT_FDB:
- err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = rocker_port_fdb_add(rocker_port, trans,
+ SWITCHDEV_OBJ_PORT_FDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -4468,17 +4487,17 @@ static int rocker_port_vlan_del(struct rocker_port *rocker_port,
{
int err;
- err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
+ err = rocker_port_router_mac(rocker_port, NULL,
ROCKER_OP_FLAG_REMOVE, htons(vid));
if (err)
return err;
- return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
+ return rocker_port_vlan(rocker_port, NULL,
ROCKER_OP_FLAG_REMOVE, vid);
}
static int rocker_port_vlans_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_vlan *vlan)
+ const struct switchdev_obj_port_vlan *vlan)
{
u16 vid;
int err;
@@ -4493,11 +4512,11 @@ static int rocker_port_vlans_del(struct rocker_port *rocker_port,
}
static int rocker_port_fdb_del(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
- const struct switchdev_obj_fdb *fdb)
+ struct switchdev_trans *trans,
+ const struct switchdev_obj_port_fdb *fdb)
{
__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
- int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+ int flags = ROCKER_OP_FLAG_REMOVE;
if (!rocker_port_is_bridged(rocker_port))
return -EINVAL;
@@ -4506,25 +4525,27 @@ static int rocker_port_fdb_del(struct rocker_port *rocker_port,
}
static int rocker_port_obj_del(struct net_device *dev,
- struct switchdev_obj *obj)
+ const struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
const struct switchdev_obj_ipv4_fib *fib4;
int err = 0;
switch (obj->id) {
- case SWITCHDEV_OBJ_PORT_VLAN:
- err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = rocker_port_vlans_del(rocker_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_IPV4_FIB:
- fib4 = &obj->u.ipv4_fib;
- err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
+ err = rocker_port_fib_ipv4(rocker_port, NULL,
htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id,
+ &fib4->fi, fib4->tb_id,
ROCKER_OP_FLAG_REMOVE);
break;
- case SWITCHDEV_OBJ_PORT_FDB:
- err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = rocker_port_fdb_del(rocker_port, NULL,
+ SWITCHDEV_OBJ_PORT_FDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -4535,10 +4556,10 @@ static int rocker_port_obj_del(struct net_device *dev,
}
static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj *obj)
+ struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
{
struct rocker *rocker = rocker_port->rocker;
- struct switchdev_obj_fdb *fdb = &obj->u.fdb;
struct rocker_fdb_tbl_entry *found;
struct hlist_node *tmp;
unsigned long lock_flags;
@@ -4547,13 +4568,13 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.pport != rocker_port->pport)
+ if (found->key.rocker_port != rocker_port)
continue;
- fdb->addr = found->key.addr;
+ ether_addr_copy(fdb->addr, found->key.addr);
fdb->ndm_state = NUD_REACHABLE;
fdb->vid = rocker_port_vlan_to_vid(rocker_port,
found->key.vlan_id);
- err = obj->cb(rocker_port->dev, obj);
+ err = cb(&fdb->obj);
if (err)
break;
}
@@ -4563,9 +4584,9 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
}
static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj *obj)
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
{
- struct switchdev_obj_vlan *vlan = &obj->u.vlan;
u16 vid;
int err = 0;
@@ -4576,7 +4597,7 @@ static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
if (rocker_vlan_id_is_internal(htons(vid)))
vlan->flags |= BRIDGE_VLAN_INFO_PVID;
vlan->vid_begin = vlan->vid_end = vid;
- err = obj->cb(rocker_port->dev, obj);
+ err = cb(&vlan->obj);
if (err)
break;
}
@@ -4585,17 +4606,20 @@ static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
}
static int rocker_port_obj_dump(struct net_device *dev,
- struct switchdev_obj *obj)
+ struct switchdev_obj *obj,
+ switchdev_obj_dump_cb_t *cb)
{
const struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
switch (obj->id) {
- case SWITCHDEV_OBJ_PORT_FDB:
- err = rocker_port_fdb_dump(rocker_port, obj);
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = rocker_port_fdb_dump(rocker_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj), cb);
break;
- case SWITCHDEV_OBJ_PORT_VLAN:
- err = rocker_port_vlan_dump(rocker_port, obj);
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = rocker_port_vlan_dump(rocker_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
break;
default:
err = -EOPNOTSUPP;
@@ -4738,7 +4762,7 @@ rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
void *priv)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_get_port_stats_prep, NULL,
rocker_cmd_get_port_stats_ethtool_proc,
priv);
@@ -4930,8 +4954,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
rocker_port = rocker->ports[i];
if (!rocker_port)
continue;
- rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
- ROCKER_OP_FLAG_REMOVE);
+ rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
free_netdev(rocker_port->dev);
}
@@ -4969,7 +4992,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port->port_number = port_number;
rocker_port->pport = port_number + 1;
rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
- INIT_LIST_HEAD(&rocker_port->trans_mem);
+ rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
rocker_port_dev_addr_init(rocker_port);
dev->netdev_ops = &rocker_port_netdev_ops;
@@ -4992,9 +5015,9 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
- rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
+ rocker_port_set_learning(rocker_port, NULL);
- err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+ err = rocker_port_ig_tbl(rocker_port, NULL, 0);
if (err) {
netdev_err(rocker_port->dev, "install ig port table failed\n");
goto err_port_ig_tbl;
@@ -5003,8 +5026,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port->internal_vlan_id =
rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
- err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
- untagged_vid, 0);
+ err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
if (err) {
netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
goto err_untagged_vlan;
@@ -5013,8 +5035,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
return 0;
err_untagged_vlan:
- rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
- ROCKER_OP_FLAG_REMOVE);
+ rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
err_port_ig_tbl:
rocker->ports[port_number] = NULL;
unregister_netdev(dev);
@@ -5183,6 +5204,10 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_tbls;
}
+ setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
+ (unsigned long) rocker);
+ mod_timer(&rocker->fdb_cleanup_timer, jiffies);
+
err = rocker_probe_ports(rocker);
if (err) {
dev_err(&pdev->dev, "failed to probe ports\n");
@@ -5195,6 +5220,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_probe_ports:
+ del_timer_sync(&rocker->fdb_cleanup_timer);
rocker_free_tbls(rocker);
err_init_tbls:
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
@@ -5222,6 +5248,7 @@ static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
+ del_timer_sync(&rocker->fdb_cleanup_timer);
rocker_free_tbls(rocker);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
rocker_remove_ports(rocker);
@@ -5275,8 +5302,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port,
rocker_port->bridge_dev = bridge;
switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
- return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
- untagged_vid, 0);
+ return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
}
static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
@@ -5298,14 +5324,12 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
false);
rocker_port->bridge_dev = NULL;
- err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
- untagged_vid, 0);
+ err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
if (err)
return err;
if (rocker_port->dev->flags & IFF_UP)
- err = rocker_port_fwd_enable(rocker_port,
- SWITCHDEV_TRANS_NONE, 0);
+ err = rocker_port_fwd_enable(rocker_port, NULL, 0);
return err;
}
@@ -5318,10 +5342,10 @@ static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
rocker_port->bridge_dev = master;
- err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+ err = rocker_port_fwd_disable(rocker_port, NULL, 0);
if (err)
return err;
- err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+ err = rocker_port_fwd_enable(rocker_port, NULL, 0);
return err;
}
@@ -5399,8 +5423,7 @@ static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
ROCKER_OP_FLAG_NOWAIT;
__be32 ip_addr = *(__be32 *)n->primary_key;
- return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
- flags, ip_addr, n->ha);
+ return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
}
static int rocker_netevent_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index ff649ebef637..bc6d21b471be 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1604,6 +1604,22 @@ efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
memcpy(outbuf, pdu + offset, outlen);
}
+static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* All our allocations have been reset */
+ efx_ef10_reset_mc_allocations(efx);
+
+ /* The datapath firmware might have been changed */
+ nic_data->must_check_datapath_caps = true;
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * statistic that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
+}
+
static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -1623,17 +1639,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
return 0;
nic_data->warm_boot_count = rc;
-
- /* All our allocations have been reset */
- efx_ef10_reset_mc_allocations(efx);
-
- /* The datapath firmware might have been changed */
- nic_data->must_check_datapath_caps = true;
-
- /* MAC statistics have been cleared on the NIC; clear the local
- * statistic that we update with efx_update_diff_stat().
- */
- nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
+ efx_ef10_mcdi_reboot_detected(efx);
return -EIO;
}
@@ -1849,7 +1855,9 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
unsigned int write_ptr;
efx_qword_t *txd;
- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+ tx_queue->xmit_more_available = false;
+ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
+ return;
do {
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
@@ -4670,6 +4678,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
.mcdi_read_response = efx_ef10_mcdi_read_response,
.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
.irq_enable_master = efx_port_dummy_op_void,
.irq_test_generate = efx_ef10_irq_test_generate,
.irq_disable_non_ev = efx_port_dummy_op_void,
@@ -4774,6 +4783,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
.mcdi_read_response = efx_ef10_mcdi_read_response,
.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
.irq_enable_master = efx_port_dummy_op_void,
.irq_test_generate = efx_ef10_irq_test_generate,
.irq_disable_non_ev = efx_port_dummy_op_void,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 974637d3ae25..6e11ee6173ce 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2062,7 +2062,7 @@ static void efx_init_napi_channel(struct efx_channel *channel)
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
napi_hash_add(&channel->napi_str);
- efx_channel_init_lock(channel);
+ efx_channel_busy_poll_init(channel);
}
static void efx_init_napi(struct efx_nic *efx)
@@ -2125,7 +2125,7 @@ static int efx_busy_poll(struct napi_struct *napi)
if (!netif_running(efx->net_dev))
return LL_FLUSH_FAILED;
- if (!efx_channel_lock_poll(channel))
+ if (!efx_channel_try_lock_poll(channel))
return LL_FLUSH_BUSY;
old_rx_packets = channel->rx_queue.rx_packets;
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index f08266f0eca2..5a1c5a8f278a 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -321,7 +321,9 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
unsigned write_ptr;
unsigned old_write_count = tx_queue->write_count;
- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+ tx_queue->xmit_more_available = false;
+ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
+ return;
do {
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index a9b9460de0d6..41fb6b60a3f0 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1028,10 +1028,21 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
/* Consume the status word since efx_mcdi_rpc_finish() won't */
for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
- if (efx_mcdi_poll_reboot(efx))
+ rc = efx_mcdi_poll_reboot(efx);
+ if (rc)
break;
udelay(MCDI_STATUS_DELAY_US);
}
+
+ /* On EF10, a CODE_MC_REBOOT event can be received without the
+ * reboot detection in efx_mcdi_poll_reboot() being triggered.
+ * If zero was returned from the final call to
+ * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
+ * MC has definitely rebooted so prepare for the reset.
+ */
+ if (!rc && efx->type->mcdi_reboot_detected)
+ efx->type->mcdi_reboot_detected(efx);
+
mcdi->new_epoch = true;
/* Nobody was waiting for an MCDI request, so trigger a reset */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c530e1c4cb4f..a8ddd122f685 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -219,6 +219,7 @@ struct efx_tx_buffer {
* @tso_packets: Number of packets via the TSO xmit path
* @pushes: Number of times the TX push feature has been used
* @pio_packets: Number of times the TX PIO feature has been used
+ * @xmit_more_available: Are any packets waiting to be pushed to the NIC
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -253,6 +254,7 @@ struct efx_tx_queue {
unsigned int tso_packets;
unsigned int pushes;
unsigned int pio_packets;
+ bool xmit_more_available;
/* Statistics to supplement MAC stats */
unsigned long tx_packets;
@@ -431,21 +433,8 @@ struct efx_channel {
struct net_device *napi_dev;
struct napi_struct napi_str;
#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
- spinlock_t state_lock;
-#define EFX_CHANNEL_STATE_IDLE 0
-#define EFX_CHANNEL_STATE_NAPI (1 << 0) /* NAPI owns this channel */
-#define EFX_CHANNEL_STATE_POLL (1 << 1) /* poll owns this channel */
-#define EFX_CHANNEL_STATE_DISABLED (1 << 2) /* channel is disabled */
-#define EFX_CHANNEL_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this channel */
-#define EFX_CHANNEL_STATE_POLL_YIELD (1 << 4) /* poll yielded this channel */
-#define EFX_CHANNEL_OWNED \
- (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
-#define EFX_CHANNEL_LOCKED \
- (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
-#define EFX_CHANNEL_USER_PEND \
- (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+ unsigned long busy_poll_state;
+#endif
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
@@ -480,98 +469,94 @@ struct efx_channel {
};
#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+enum efx_channel_busy_poll_state {
+ EFX_CHANNEL_STATE_IDLE = 0,
+ EFX_CHANNEL_STATE_NAPI = BIT(0),
+ EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
+ EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
+ EFX_CHANNEL_STATE_POLL_BIT = 2,
+ EFX_CHANNEL_STATE_POLL = BIT(2),
+ EFX_CHANNEL_STATE_DISABLE_BIT = 3,
+};
+
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
{
- spin_lock_init(&channel->state_lock);
+ WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
}
/* Called from the device poll routine to get ownership of a channel. */
static inline bool efx_channel_lock_napi(struct efx_channel *channel)
{
- bool rc = true;
-
- spin_lock_bh(&channel->state_lock);
- if (channel->state & EFX_CHANNEL_LOCKED) {
- WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
- channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
- rc = false;
- } else {
- /* we don't care if someone yielded */
- channel->state = EFX_CHANNEL_STATE_NAPI;
+ unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
+
+ while (1) {
+ switch (old) {
+ case EFX_CHANNEL_STATE_POLL:
+ /* Ensure efx_channel_try_lock_poll() wont starve us */
+ set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
+ &channel->busy_poll_state);
+ /* fallthrough */
+ case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
+ return false;
+ default:
+ break;
+ }
+ prev = cmpxchg(&channel->busy_poll_state, old,
+ EFX_CHANNEL_STATE_NAPI);
+ if (unlikely(prev != old)) {
+ /* This is likely to mean we've just entered polling
+ * state. Go back round to set the REQ bit.
+ */
+ old = prev;
+ continue;
+ }
+ return true;
}
- spin_unlock_bh(&channel->state_lock);
- return rc;
}
static inline void efx_channel_unlock_napi(struct efx_channel *channel)
{
- spin_lock_bh(&channel->state_lock);
- WARN_ON(channel->state &
- (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));
-
- channel->state &= EFX_CHANNEL_STATE_DISABLED;
- spin_unlock_bh(&channel->state_lock);
+ /* Make sure write has completed from efx_channel_lock_napi() */
+ smp_wmb();
+ WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
}
/* Called from efx_busy_poll(). */
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
{
- bool rc = true;
-
- spin_lock_bh(&channel->state_lock);
- if ((channel->state & EFX_CHANNEL_LOCKED)) {
- channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
- rc = false;
- } else {
- /* preserve yield marks */
- channel->state |= EFX_CHANNEL_STATE_POLL;
- }
- spin_unlock_bh(&channel->state_lock);
- return rc;
+ return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,
+ EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
}
-/* Returns true if NAPI tried to get the channel while it was locked. */
static inline void efx_channel_unlock_poll(struct efx_channel *channel)
{
- spin_lock_bh(&channel->state_lock);
- WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
-
- /* will reset state to idle, unless channel is disabled */
- channel->state &= EFX_CHANNEL_STATE_DISABLED;
- spin_unlock_bh(&channel->state_lock);
+ clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
}
-/* True if a socket is polling, even if it did not get the lock. */
static inline bool efx_channel_busy_polling(struct efx_channel *channel)
{
- WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
- return channel->state & EFX_CHANNEL_USER_PEND;
+ return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
}
static inline void efx_channel_enable(struct efx_channel *channel)
{
- spin_lock_bh(&channel->state_lock);
- channel->state = EFX_CHANNEL_STATE_IDLE;
- spin_unlock_bh(&channel->state_lock);
+ clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
+ &channel->busy_poll_state);
}
-/* False if the channel is currently owned. */
+/* Stop further polling or napi access.
+ * Returns false if the channel is currently busy polling.
+ */
static inline bool efx_channel_disable(struct efx_channel *channel)
{
- bool rc = true;
-
- spin_lock_bh(&channel->state_lock);
- if (channel->state & EFX_CHANNEL_OWNED)
- rc = false;
- channel->state |= EFX_CHANNEL_STATE_DISABLED;
- spin_unlock_bh(&channel->state_lock);
-
- return rc;
+ set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
+ /* Implicit barrier in efx_channel_busy_polling() */
+ return !efx_channel_busy_polling(channel);
}
#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
{
}
@@ -584,7 +569,7 @@ static inline void efx_channel_unlock_napi(struct efx_channel *channel)
{
}
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
{
return false;
}
@@ -1277,6 +1262,7 @@ struct efx_nic_type {
void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
size_t pdu_offset, size_t pdu_len);
int (*mcdi_poll_reboot)(struct efx_nic *efx);
+ void (*mcdi_reboot_detected)(struct efx_nic *efx);
void (*irq_enable_master)(struct efx_nic *efx);
void (*irq_test_generate)(struct efx_nic *efx);
void (*irq_disable_non_ev)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 1833a0146571..67f6afaa022f 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -431,8 +431,20 @@ finish_packet:
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+ /* There could be packets left on the partner queue if those
+ * SKBs had skb->xmit_more set. If we do not push those they
+ * could be left for a long time and cause a netdev watchdog.
+ */
+ if (txq2->xmit_more_available)
+ efx_nic_push_buffers(txq2);
+
efx_nic_push_buffers(tx_queue);
+ } else {
+ tx_queue->xmit_more_available = skb->xmit_more;
+ }
tx_queue->tx_packets++;
@@ -722,6 +734,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+ tx_queue->xmit_more_available = false;
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
@@ -747,6 +760,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
++tx_queue->read_count;
}
+ tx_queue->xmit_more_available = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
@@ -1302,8 +1316,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+ /* There could be packets left on the partner queue if those
+ * SKBs had skb->xmit_more set. If we do not push those they
+ * could be left for a long time and cause a netdev watchdog.
+ */
+ if (txq2->xmit_more_available)
+ efx_nic_push_buffers(txq2);
+
efx_nic_push_buffers(tx_queue);
+ } else {
+ tx_queue->xmit_more_available = skb->xmit_more;
+ }
tx_queue->tso_bursts++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 630f0b7800e4..0e2fc1a844ab 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2018,10 +2018,18 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
lp->cfg.flags |= SMC91X_USE_DMA;
# endif
if (lp->cfg.flags & SMC91X_USE_DMA) {
- int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
- smc_pxa_dma_irq, NULL);
- if (dma >= 0)
- dev->dma = dma;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ param.prio = PXAD_PRIO_LOWEST;
+ param.drcmr = -1UL;
+
+ lp->dma_chan =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &dev->dev,
+ "data");
}
#endif
@@ -2032,8 +2040,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
version_string, revision_register & 0x0f,
lp->base, dev->irq);
- if (dev->dma != (unsigned char)-1)
- pr_cont(" DMA %d", dev->dma);
+ if (lp->dma_chan)
+ pr_cont(" DMA %p", lp->dma_chan);
pr_cont("%s%s\n",
lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
@@ -2058,8 +2066,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
err_out:
#ifdef CONFIG_ARCH_PXA
- if (retval && dev->dma != (unsigned char)-1)
- pxa_free_dma(dev->dma);
+ if (retval && lp->dma_chan)
+ dma_release_channel(lp->dma_chan);
#endif
return retval;
}
@@ -2370,6 +2378,7 @@ static int smc_drv_probe(struct platform_device *pdev)
struct smc_local *lp = netdev_priv(ndev);
lp->device = &pdev->dev;
lp->physaddr = res->start;
+
}
#endif
@@ -2406,8 +2415,8 @@ static int smc_drv_remove(struct platform_device *pdev)
free_irq(ndev->irq, ndev);
#ifdef CONFIG_ARCH_PXA
- if (ndev->dma != (unsigned char)-1)
- pxa_free_dma(ndev->dma);
+ if (lp->dma_chan)
+ dma_release_channel(lp->dma_chan);
#endif
iounmap(lp->base);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 3a18501d1068..a3c129e1e40a 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -33,6 +33,7 @@
#ifndef _SMC91X_H_
#define _SMC91X_H_
+#include <linux/dmaengine.h>
#include <linux/smc91x.h>
/*
@@ -244,6 +245,7 @@ struct smc_local {
u_long physaddr;
struct device *device;
#endif
+ struct dma_chan *dma_chan;
void __iomem *base;
void __iomem *datacs;
@@ -265,21 +267,47 @@ struct smc_local {
* as RX which can overrun memory and lose packets.
*/
#include <linux/dma-mapping.h>
-#include <mach/dma.h>
+#include <linux/dma/pxa-dma.h>
#ifdef SMC_insl
#undef SMC_insl
#define SMC_insl(a, r, p, l) \
smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
static inline void
+smc_pxa_dma_inpump(struct smc_local *lp, u_char *buf, int len)
+{
+ dma_addr_t dmabuf;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ enum dma_status status;
+ struct dma_tx_state state;
+
+ dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
+ tx = dmaengine_prep_slave_single(lp->dma_chan, dmabuf, len,
+ DMA_DEV_TO_MEM, 0);
+ if (tx) {
+ cookie = dmaengine_submit(tx);
+ dma_async_issue_pending(lp->dma_chan);
+ do {
+ status = dmaengine_tx_status(lp->dma_chan, cookie,
+ &state);
+ cpu_relax();
+ } while (status != DMA_COMPLETE && status != DMA_ERROR &&
+ state.residue);
+ dmaengine_terminate_all(lp->dma_chan);
+ }
+ dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+}
+
+static inline void
smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
u_char *buf, int len)
{
- u_long physaddr = lp->physaddr;
- dma_addr_t dmabuf;
+ struct dma_slave_config config;
+ int ret;
/* fallback if no DMA available */
- if (dma == (unsigned char)-1) {
+ if (!lp->dma_chan) {
readsl(ioaddr + reg, buf, len);
return;
}
@@ -291,18 +319,22 @@ smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
len--;
}
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = lp->physaddr + reg;
+ config.dst_addr = lp->physaddr + reg;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(lp->dma_chan, &config);
+ if (ret) {
+ dev_err(lp->device, "dma channel configuration failed: %d\n",
+ ret);
+ return;
+ }
+
len *= 4;
- dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
- DCSR(dma) = DCSR_NODESC;
- DTADR(dma) = dmabuf;
- DSADR(dma) = physaddr + reg;
- DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
- DCMD_WIDTH4 | (DCMD_LENGTH & len));
- DCSR(dma) = DCSR_NODESC | DCSR_RUN;
- while (!(DCSR(dma) & DCSR_STOPSTATE))
- cpu_relax();
- DCSR(dma) = 0;
- dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+ smc_pxa_dma_inpump(lp, buf, len);
}
#endif
@@ -314,11 +346,11 @@ static inline void
smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
u_char *buf, int len)
{
- u_long physaddr = lp->physaddr;
- dma_addr_t dmabuf;
+ struct dma_slave_config config;
+ int ret;
/* fallback if no DMA available */
- if (dma == (unsigned char)-1) {
+ if (!lp->dma_chan) {
readsw(ioaddr + reg, buf, len);
return;
}
@@ -330,26 +362,25 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
len--;
}
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ config.src_addr = lp->physaddr + reg;
+ config.dst_addr = lp->physaddr + reg;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(lp->dma_chan, &config);
+ if (ret) {
+ dev_err(lp->device, "dma channel configuration failed: %d\n",
+ ret);
+ return;
+ }
+
len *= 2;
- dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
- DCSR(dma) = DCSR_NODESC;
- DTADR(dma) = dmabuf;
- DSADR(dma) = physaddr + reg;
- DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
- DCMD_WIDTH2 | (DCMD_LENGTH & len));
- DCSR(dma) = DCSR_NODESC | DCSR_RUN;
- while (!(DCSR(dma) & DCSR_STOPSTATE))
- cpu_relax();
- DCSR(dma) = 0;
- dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+ smc_pxa_dma_inpump(lp, buf, len);
}
#endif
-static void
-smc_pxa_dma_irq(int dma, void *dummy)
-{
- DCSR(dma) = 0;
-}
#endif /* CONFIG_ARCH_PXA */
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 3b4cd8a263de..c860c9007e49 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1052,6 +1052,7 @@ static int smsc911x_mii_probe(struct net_device *dev)
#ifdef USE_PHY_WORK_AROUND
if (smsc911x_phy_loopbacktest(dev) < 0) {
SMSC_WARN(pdata, hw, "Failed Loop Back Test");
+ phy_disconnect(phydev);
return -ENODEV;
}
SMSC_TRACE(pdata, hw, "Passed Loop Back Test");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 771cda2a48b2..2e51b816a7e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -721,10 +721,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
+ if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
- info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (priv->ptp_clock)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 925f2f8659b8..64d8aa4e0cad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -424,7 +424,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
- struct timespec now;
+ struct timespec64 now;
u64 temp = 0;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
@@ -621,8 +621,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
priv->default_addend);
/* initialize system time */
- getnstimeofday(&now);
- priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
now.tv_nsec);
}
@@ -1945,7 +1947,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int txsize = priv->dma_tx_size;
- unsigned int entry;
+ int entry;
int i, csum_insertion = 0, is_jumbo = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6ce973187225..062bce9acde6 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4529,9 +4529,6 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
- info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
- cp->casreg_len : CAS_MAX_REGS;
- info->n_stats = CAS_NUM_STAT_KEYS;
}
static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index a9cac8413e49..14c9d1baa85c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2182,11 +2182,6 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
sizeof(drvinfo->bus_info));
-
- drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
/*
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index cba3d9fcb465..77d26fe286c0 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -899,7 +899,6 @@ static void cpmac_get_drvinfo(struct net_device *dev,
strlcpy(info->driver, "cpmac", sizeof(info->driver));
strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
- info->regdump_len = 0;
}
static const struct ethtool_ops cpmac_ethtool_ops = {
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index f59509486113..c08be62bceba 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -19,11 +19,38 @@
#include "cpsw.h"
-#define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
-#define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
+#define CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
+#define CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
-int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
- u8 *mac_addr)
+static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
+ int slave, u8 *mac_addr)
+{
+ u32 macid_lsb;
+ u32 macid_msb;
+ struct regmap *syscon;
+
+ syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(syscon)) {
+ if (PTR_ERR(syscon) == -ENODEV)
+ return 0;
+ return PTR_ERR(syscon);
+ }
+
+ regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lsb);
+ regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_msb);
+
+ mac_addr[0] = (macid_msb >> 16) & 0xff;
+ mac_addr[1] = (macid_msb >> 8) & 0xff;
+ mac_addr[2] = macid_msb & 0xff;
+ mac_addr[3] = (macid_lsb >> 16) & 0xff;
+ mac_addr[4] = (macid_lsb >> 8) & 0xff;
+ mac_addr[5] = macid_lsb & 0xff;
+
+ return 0;
+}
+
+static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
+ u8 *mac_addr)
{
u32 macid_lo;
u32 macid_hi;
@@ -36,10 +63,8 @@ int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
return PTR_ERR(syscon);
}
- regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(offset, slave),
- &macid_lo);
- regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(offset, slave),
- &macid_hi);
+ regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lo);
+ regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_hi);
mac_addr[5] = (macid_lo >> 8) & 0xff;
mac_addr[4] = macid_lo & 0xff;
@@ -50,6 +75,27 @@ int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_am33xx_cm_get_macid);
+
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
+{
+ if (of_machine_is_compatible("ti,am33xx"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (of_device_is_compatible(dev->of_node, "ti,am3517-emac"))
+ return davinci_emac_3517_get_macid(dev, 0x110, slave, mac_addr);
+
+ if (of_device_is_compatible(dev->of_node, "ti,dm816-emac"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,am4372"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,dra7"))
+ return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
+
+ dev_err(dev, "incompatible machine/device type for reading mac address\n");
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(ti_cm_get_macid);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 0ea78326cc21..e9cc61e1ec74 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -2,6 +2,8 @@
*
* Copyright (C) 2013 Texas Instruments
*
+ * Module Author: Mugunthan V N <mugunthanvnm@ti.com>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@@ -13,7 +15,7 @@
*/
#include <linux/platform_device.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/of.h>
@@ -173,7 +175,6 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
},
{}
};
-MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
static int cpsw_phy_sel_probe(struct platform_device *pdev)
{
@@ -214,7 +215,4 @@ static struct platform_driver cpsw_phy_sel_driver = {
.of_match_table = cpsw_phy_sel_id_table,
},
};
-
-module_platform_driver(cpsw_phy_sel_driver);
-MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(cpsw_phy_sel_driver);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 874fb297e96c..040fbc1e5508 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -29,6 +29,7 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
@@ -1789,7 +1790,6 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
strlcpy(info->driver, "cpsw", sizeof(info->driver));
strlcpy(info->version, "1.0", sizeof(info->version));
strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
- info->regdump_len = cpsw_get_regs_len(ndev);
}
static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -2064,13 +2064,10 @@ no_phy_slave:
if (mac_addr) {
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
} else {
- if (of_machine_is_compatible("ti,am33xx")) {
- ret = cpsw_am33xx_cm_get_macid(&pdev->dev,
- 0x630, i,
- slave_data->mac_addr);
- if (ret)
- return ret;
- }
+ ret = ti_cm_get_macid(&pdev->dev, i,
+ slave_data->mac_addr);
+ if (ret)
+ return ret;
}
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
@@ -2214,6 +2211,7 @@ static int cpsw_probe(struct platform_device *pdev)
void __iomem *ss_regs;
struct resource *res, *ss_res;
const struct of_device_id *of_id;
+ struct gpio_descs *mode;
u32 slave_offset, sliver_offset, slave_size;
int ret = 0, i;
int irq;
@@ -2239,6 +2237,13 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ndev_ret;
}
+ mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
+ if (IS_ERR(mode)) {
+ ret = PTR_ERR(mode);
+ dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
+ goto clean_ndev_ret;
+ }
+
/*
* This may be required here for child devices.
*/
@@ -2585,17 +2590,7 @@ static struct platform_driver cpsw_driver = {
.remove = cpsw_remove,
};
-static int __init cpsw_init(void)
-{
- return platform_driver_register(&cpsw_driver);
-}
-late_initcall(cpsw_init);
-
-static void __exit cpsw_exit(void)
-{
- platform_driver_unregister(&cpsw_driver);
-}
-module_exit(cpsw_exit);
+module_platform_driver(cpsw_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index ca90efafd156..442a7038e660 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -41,7 +41,6 @@ struct cpsw_platform_data {
};
void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
-int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
- u8 *mac_addr);
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index a21c77bc1b27..33bd3b902304 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1861,8 +1861,12 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram");
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!priv->phy_node)
- pdata->phy_id = NULL;
+ if (!priv->phy_node) {
+ if (!of_phy_is_fixed_link(np))
+ pdata->phy_id = NULL;
+ else if (of_phy_register_fixed_link(np) >= 0)
+ priv->phy_node = of_node_get(np);
+ }
auxdata = pdev->dev.platform_data;
if (auxdata) {
@@ -1882,51 +1886,13 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
return pdata;
}
-static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
- int slave, u8 *mac_addr)
-{
- u32 macid_lsb;
- u32 macid_msb;
- struct regmap *syscon;
-
- syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
- if (IS_ERR(syscon)) {
- if (PTR_ERR(syscon) == -ENODEV)
- return 0;
- return PTR_ERR(syscon);
- }
-
- regmap_read(syscon, offset, &macid_lsb);
- regmap_read(syscon, offset + 4, &macid_msb);
-
- mac_addr[0] = (macid_msb >> 16) & 0xff;
- mac_addr[1] = (macid_msb >> 8) & 0xff;
- mac_addr[2] = macid_msb & 0xff;
- mac_addr[3] = (macid_lsb >> 16) & 0xff;
- mac_addr[4] = (macid_lsb >> 8) & 0xff;
- mac_addr[5] = macid_lsb & 0xff;
-
- return 0;
-}
-
static int davinci_emac_try_get_mac(struct platform_device *pdev,
int instance, u8 *mac_addr)
{
- int error = -EINVAL;
-
if (!pdev->dev.of_node)
- return error;
-
- if (of_device_is_compatible(pdev->dev.of_node, "ti,am3517-emac"))
- error = davinci_emac_3517_get_macid(&pdev->dev, 0x110,
- 0, mac_addr);
- else if (of_device_is_compatible(pdev->dev.of_node,
- "ti,dm816-emac"))
- error = cpsw_am33xx_cm_get_macid(&pdev->dev, 0x30,
- instance,
- mac_addr);
-
- return error;
+ return -EINVAL;
+
+ return ti_cm_get_macid(&pdev->dev, instance, mac_addr);
}
/**
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 691ec936e88d..a274cd49afe9 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -791,7 +791,6 @@ static void tlan_get_drvinfo(struct net_device *dev,
sizeof(info->bus_info));
else
strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
- info->eedump_len = TLAN_EEPROM_SIZE;
}
static int tlan_get_eeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 8cf9d4f56bb2..415de1eaf641 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -59,16 +59,15 @@ static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
{
struct mii_bus *bus;
- const u32 *bus_hz;
+ u32 bus_hz;
int clk_div;
- int rc, size;
+ int rc;
struct resource res;
/* Calculate a reasonable divisor for the clock rate */
clk_div = 0x3f; /* worst-case default setting */
- bus_hz = of_get_property(np, "clock-frequency", &size);
- if (bus_hz && size >= sizeof(*bus_hz)) {
- clk_div = (*bus_hz) / (2500 * 1000 * 2) - 1;
+ if (of_property_read_u32(np, "clock-frequency", &bus_hz) == 0) {
+ clk_div = bus_hz / (2500 * 1000 * 2) - 1;
if (clk_div < 1)
clk_div = 1;
if (clk_div > 0x3f)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d95f9aae95e7..4684644703cc 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1135,7 +1135,6 @@ static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
{
strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
- ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
}
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 2a5a16834c01..507bbb0355c2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -129,7 +129,6 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
{
int ret;
u32 clk_div, host_clock;
- u32 *property_p;
struct mii_bus *bus;
struct resource res;
struct device_node *np1;
@@ -168,8 +167,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
clk_div = DEFAULT_CLOCK_DIVISOR;
goto issue;
}
- property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
- if (!property_p) {
+ if (of_property_read_u32(np1, "clock-frequency", &host_clock)) {
netdev_warn(lp->ndev, "clock-frequency property not found.\n");
netdev_warn(lp->ndev,
"Setting MDIO clock divisor to default %d\n",
@@ -179,7 +177,6 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
goto issue;
}
- host_clock = be32_to_cpup(property_p);
clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add