aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 14:54:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-05 14:54:29 -0700
commitcc998ff8811530be521f6b316f37ab7676a07938 (patch)
treea054b3bf4b2ef406bf756a6cfc9be2f9115f17ae
parent57d730924d5cc2c3e280af16a9306587c3a511db (diff)
parent0d40f75bdab241868c0eb6f97aef9f8b3a66f7b3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: "Noteworthy changes this time around: 1) Multicast rejoin support for team driver, from Jiri Pirko. 2) Centralize and simplify TCP RTT measurement handling in order to reduce the impact of bad RTO seeding from SYN/ACKs. Also, when both timestamps and local RTT measurements are available prefer the later because there are broken middleware devices which scramble the timestamp. From Yuchung Cheng. 3) Add TCP_NOTSENT_LOWAT socket option to limit the amount of kernel memory consumed to queue up unsend user data. From Eric Dumazet. 4) Add a "physical port ID" abstraction for network devices, from Jiri Pirko. 5) Add a "suppress" operation to influence fib_rules lookups, from Stefan Tomanek. 6) Add a networking development FAQ, from Paul Gortmaker. 7) Extend the information provided by tcp_probe and add ipv6 support, from Daniel Borkmann. 8) Use RCU locking more extensively in openvswitch data paths, from Pravin B Shelar. 9) Add SCTP support to openvswitch, from Joe Stringer. 10) Add EF10 chip support to SFC driver, from Ben Hutchings. 11) Add new SYNPROXY netfilter target, from Patrick McHardy. 12) Compute a rate approximation for sending in TCP sockets, and use this to more intelligently coalesce TSO frames. Furthermore, add a new packet scheduler which takes advantage of this estimate when available. From Eric Dumazet. 13) Allow AF_PACKET fanouts with random selection, from Daniel Borkmann. 14) Add ipv6 support to vxlan driver, from Cong Wang" Resolved conflicts as per discussion. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1218 commits) openvswitch: Fix alignment of struct sw_flow_key. netfilter: Fix build errors with xt_socket.c tcp: Add missing braces to do_tcp_setsockopt caif: Add missing braces to multiline if in cfctrl_linkup_request bnx2x: Add missing braces in bnx2x:bnx2x_link_initialize vxlan: Fix kernel panic on device delete. net: mvneta: implement ->ndo_do_ioctl() to support PHY ioctls net: mvneta: properly disable HW PHY polling and ensure adjust_link() works icplus: Use netif_running to determine device state ethernet/arc/arc_emac: Fix huge delays in large file copies tuntap: orphan frags before trying to set tx timestamp tuntap: purge socket error queue on detach qlcnic: use standard NAPI weights ipv6:introduce function to find route for redirect bnx2x: VF RSS support - VF side bnx2x: VF RSS support - PF side vxlan: Notify drivers for listening UDP port changes net: usbnet: update addr_assign_type if appropriate driver/net: enic: update enic maintainers and driver driver/net: enic: Exposing symbols for Cisco's low latency driver ...
-rw-r--r--Documentation/DocBook/80211.tmpl1
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz9021.txt49
-rw-r--r--Documentation/devicetree/bindings/net/moxa,moxart-mac.txt21
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt5
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/e100.txt4
-rw-r--r--Documentation/networking/e1000.txt12
-rw-r--r--Documentation/networking/e1000e.txt16
-rw-r--r--Documentation/networking/igb.txt67
-rw-r--r--Documentation/networking/igbvf.txt8
-rw-r--r--Documentation/networking/ip-sysctl.txt55
-rw-r--r--Documentation/networking/ixgb.txt14
-rw-r--r--Documentation/networking/ixgbe.txt109
-rw-r--r--Documentation/networking/ixgbevf.txt6
-rw-r--r--Documentation/networking/netdev-FAQ.txt224
-rw-r--r--Documentation/networking/openvswitch.txt40
-rw-r--r--Documentation/networking/packet_mmap.txt8
-rw-r--r--Documentation/networking/sctp.txt5
-rw-r--r--Documentation/networking/stmmac.txt3
-rw-r--r--Documentation/networking/tproxy.txt5
-rw-r--r--Documentation/sysctl/net.txt13
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/arm/boot/dts/sama5d3xmb.dtsi8
-rw-r--r--arch/arm/mach-pxa/icontrol.c3
-rw-r--r--arch/arm/mach-pxa/zeus.c46
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c1
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c1
-rw-r--r--arch/sh/boards/board-espt.c1
-rw-r--r--arch/sh/boards/board-sh7757lcr.c4
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c1
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c3
-rw-r--r--arch/sh/boards/mach-sh7763rdp/setup.c1
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c11
-rw-r--r--arch/tile/gxio/iorpc_mpipe.c66
-rw-r--r--arch/tile/gxio/iorpc_mpipe_info.c18
-rw-r--r--arch/tile/gxio/mpipe.c43
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe.h14
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe_info.h4
-rw-r--r--arch/tile/include/gxio/mpipe.h143
-rw-r--r--arch/tile/include/hv/drv_mpipe_intf.h3
-rw-r--r--drivers/atm/he.c11
-rw-r--r--drivers/atm/nicstar.c26
-rw-r--r--drivers/bcma/Kconfig10
-rw-r--r--drivers/bcma/driver_pci.c65
-rw-r--r--drivers/bcma/driver_pci_host.c6
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/bcma/scan.c28
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c12
-rw-r--r--drivers/net/bonding/bond_3ad.c52
-rw-r--r--drivers/net/bonding/bond_alb.c144
-rw-r--r--drivers/net/bonding/bond_alb.h3
-rw-r--r--drivers/net/bonding/bond_main.c961
-rw-r--r--drivers/net/bonding/bond_procfs.c12
-rw-r--r--drivers/net/bonding/bond_sysfs.c90
-rw-r--r--drivers/net/bonding/bonding.h96
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/flexcan.c83
-rw-r--r--drivers/net/can/mcp251x.c98
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c23
-rw-r--r--drivers/net/can/mscan/mscan.c25
-rw-r--r--drivers/net/can/mscan/mscan.h3
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c6
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c317
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c106
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c93
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c309
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c396
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c221
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h41
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c189
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h69
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c146
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h12
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c53
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c195
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h55
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c48
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.h30
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c257
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c329
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h176
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c8
-rw-r--r--drivers/net/ethernet/dlink/sundance.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h76
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c503
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h97
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c25
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c777
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h4
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c245
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c21
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c168
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h16
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c74
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h19
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c15
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c107
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c140
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c130
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c198
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h42
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c155
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c80
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c31
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c132
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c148
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c321
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c157
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c133
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c180
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c542
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c105
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c90
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c177
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h13
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c12
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/moxa/Kconfig30
-rw-r--r--drivers/net/ethernet/moxa/Makefile5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c559
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h330
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c213
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c67
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c98
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c20
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h304
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c743
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h50
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c292
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c40
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c237
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c223
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c454
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c165
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c71
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h10
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile7
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3043
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h415
-rw-r--r--drivers/net/ethernet/sfc/efx.c500
-rw-r--r--drivers/net/ethernet/sfc/efx.h129
-rw-r--r--drivers/net/ethernet/sfc/enum.h10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c399
-rw-r--r--drivers/net/ethernet/sfc/falcon.c1171
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c362
-rw-r--r--drivers/net/ethernet/sfc/farch.c2942
-rw-r--r--drivers/net/ethernet/sfc/farch_regs.h (renamed from drivers/net/ethernet/sfc/regs.h)272
-rw-r--r--drivers/net/ethernet/sfc/filter.c1274
-rw-r--r--drivers/net/ethernet/sfc/filter.h238
-rw-r--r--drivers/net/ethernet/sfc/io.h50
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c1262
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h313
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c130
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c274
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h5540
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c (renamed from drivers/net/ethernet/sfc/mcdi_phy.c)345
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c634
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h408
-rw-r--r--drivers/net/ethernet/sfc/nic.c1902
-rw-r--r--drivers/net/ethernet/sfc/nic.h539
-rw-r--r--drivers/net/ethernet/sfc/phy.h19
-rw-r--r--drivers/net/ethernet/sfc/ptp.c95
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c176
-rw-r--r--drivers/net/ethernet/sfc/selftest.c15
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c711
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c102
-rw-r--r--drivers/net/ethernet/sfc/spi.h99
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c35
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h2
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h22
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis190.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c28
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c9
-rw-r--r--drivers/net/ethernet/sun/niu.c8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c12
-rw-r--r--drivers/net/ethernet/ti/cpmac.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c263
-rw-r--r--drivers/net/ethernet/ti/cpsw.h (renamed from include/linux/platform_data/cpsw.h)12
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c7
-rw-r--r--drivers/net/ethernet/tile/Kconfig11
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1116
-rw-r--r--drivers/net/ethernet/tile/tilepro.c241
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c20
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fddi/defxx.c6
-rw-r--r--drivers/net/irda/ali-ircc.c8
-rw-r--r--drivers/net/irda/nsc-ircc.c8
-rw-r--r--drivers/net/irda/pxaficp_ir.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c8
-rw-r--r--drivers/net/irda/via-ircc.c8
-rw-r--r--drivers/net/irda/w83977af_ir.c8
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/macvtap.c105
-rw-r--r--drivers/net/netconsole.c13
-rw-r--r--drivers/net/phy/mdio-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/phy/mdio-octeon.c4
-rw-r--r--drivers/net/phy/mdio-sun4i.c18
-rw-r--r--drivers/net/phy/micrel.c105
-rw-r--r--drivers/net/ppp/pptp.c12
-rw-r--r--drivers/net/team/team.c203
-rw-r--r--drivers/net/tun.c223
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.h2
-rw-r--r--drivers/net/usb/asix_devices.c5
-rw-r--r--drivers/net/usb/ax88172a.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c24
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c845
-rw-r--r--drivers/net/usb/sr9700.c560
-rw-r--r--drivers/net/usb/sr9700.h173
-rw-r--r--drivers/net/usb/usbnet.c58
-rw-r--r--drivers/net/virtio_net.c44
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c211
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h10
-rw-r--r--drivers/net/vxlan.c1308
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c321
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h58
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c87
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h49
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c61
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c43
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c488
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c356
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c127
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c59
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c25
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.h7
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c672
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c190
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h68
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c310
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h33
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c115
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c68
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c157
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c494
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c533
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile3
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h22
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c160
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h27
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c22
-rw-r--r--drivers/net/wireless/b43/dma.c6
-rw-r--r--drivers/net/wireless/b43/main.c14
-rw-r--r--drivers/net/wireless/b43legacy/dma.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c279
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c481
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c228
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c65
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c399
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c405
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h1
-rw-r--r--drivers/net/wireless/cw1200/bh.c4
-rw-r--r--drivers/net/wireless/cw1200/main.c2
-rw-r--r--drivers/net/wireless/cw1200/wsm.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c31
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c18
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c15
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c172
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c62
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c105
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h25
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c162
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h80
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c217
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c271
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h147
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h255
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c82
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h112
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c58
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c383
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c27
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c653
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h80
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c158
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c103
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c32
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c23
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c30
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c144
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c60
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c15
-rw-r--r--drivers/net/wireless/mwifiex/11n.c16
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c177
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c42
-rw-r--r--drivers/net/wireless/mwifiex/decl.h12
-rw-r--r--drivers/net/wireless/mwifiex/fw.h72
-rw-r--r--drivers/net/wireless/mwifiex/ie.c2
-rw-r--r--drivers/net/wireless/mwifiex/init.c15
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h43
-rw-r--r--drivers/net/wireless/mwifiex/join.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c110
-rw-r--r--drivers/net/wireless/mwifiex/main.h11
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c39
-rw-r--r--drivers/net/wireless/mwifiex/scan.c63
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c219
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c77
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c10
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c11
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c49
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c130
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c70
-rw-r--r--drivers/net/wireless/mwifiex/usb.c58
-rw-r--r--drivers/net/wireless/mwifiex/util.c4
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c16
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h279
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c1655
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c43
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.h3
-rw-r--r--drivers/net/wireless/zd1201.c8
-rw-r--r--drivers/net/xen-netback/common.h150
-rw-r--r--drivers/net/xen-netback/interface.c135
-rw-r--r--drivers/net/xen-netback/netback.c833
-rw-r--r--drivers/nfc/nfcsim.c6
-rw-r--r--drivers/nfc/pn533.c389
-rw-r--r--drivers/nfc/pn544/i2c.c360
-rw-r--r--drivers/nfc/pn544/mei.c2
-rw-r--r--drivers/nfc/pn544/pn544.c20
-rw-r--r--drivers/nfc/pn544/pn544.h7
-rw-r--r--drivers/pci/pci.c43
-rw-r--r--drivers/pci/pci.h3
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c12
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c8
-rw-r--r--drivers/staging/vt6655/hostap.c2
-rw-r--r--drivers/staging/vt6655/ioctl.c2
-rw-r--r--drivers/staging/vt6655/wpactl.c2
-rw-r--r--drivers/vhost/net.c92
-rw-r--r--drivers/vhost/vhost.c56
-rw-r--r--include/linux/bcma/bcma.h17
-rw-r--r--include/linux/bcma/bcma_driver_pci.h24
-rw-r--r--include/linux/can/platform/mcp251x.h15
-rw-r--r--include/linux/dm9000.h4
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/etherdevice.h15
-rw-r--r--include/linux/fs_enet_pd.h6
-rw-r--r--include/linux/ieee80211.h72
-rw-r--r--include/linux/if_team.h14
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/mlx4/cmd.h1
-rw-r--r--include/linux/mlx4/device.h12
-rw-r--r--include/linux/mlx4/qp.h5
-rw-r--r--include/linux/mv643xx_eth.h3
-rw-r--r--include/linux/netdevice.h54
-rw-r--r--include/linux/netfilter.h15
-rw-r--r--include/linux/pci.h15
-rw-r--r--include/linux/pci_hotplug.h13
-rw-r--r--include/linux/platform_data/brcmfmac-sdio.h6
-rw-r--r--include/linux/sh_eth.h10
-rw-r--r--include/linux/skbuff.h19
-rw-r--r--include/linux/smsc911x.h3
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/usb/usbnet.h3
-rw-r--r--include/linux/uwb/spec.h5
-rw-r--r--include/media/tveeprom.h4
-rw-r--r--include/net/9p/transport.h3
-rw-r--r--include/net/act_api.h60
-rw-r--r--include/net/addrconf.h180
-rw-r--r--include/net/af_rxrpc.h35
-rw-r--r--include/net/af_unix.h17
-rw-r--r--include/net/af_vsock.h (renamed from net/vmw_vsock/af_vsock.h)0
-rw-r--r--include/net/arp.h30
-rw-r--r--include/net/ax25.h215
-rw-r--r--include/net/bluetooth/bluetooth.h8
-rw-r--r--include/net/bluetooth/hci.h7
-rw-r--r--include/net/bluetooth/hci_core.h10
-rw-r--r--include/net/bluetooth/sco.h1
-rw-r--r--include/net/cfg80211.h239
-rw-r--r--include/net/checksum.h10
-rw-r--r--include/net/cls_cgroup.h2
-rw-r--r--include/net/dst.h12
-rw-r--r--include/net/fib_rules.h14
-rw-r--r--include/net/ieee80211_radiotap.h4
-rw-r--r--include/net/if_inet6.h9
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_tunnels.h11
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/irda/irlan_common.h3
-rw-r--r--include/net/llc_if.h30
-rw-r--r--include/net/mac80211.h192
-rw-r--r--include/net/mld.h51
-rw-r--r--include/net/ndisc.h5
-rw-r--r--include/net/neighbour.h98
-rw-r--r--include/net/net_namespace.h37
-rw-r--r--include/net/netfilter/nf_conntrack.h9
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h6
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h7
-rw-r--r--include/net/netfilter/nf_conntrack_seqadj.h51
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h77
-rw-r--r--include/net/netfilter/nf_nat.h10
-rw-r--r--include/net/netfilter/nf_nat_helper.h19
-rw-r--r--include/net/netfilter/nf_tproxy_core.h210
-rw-r--r--include/net/netfilter/nfnetlink_queue.h8
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netprio_cgroup.h2
-rw-r--r--include/net/nfc/nfc.h3
-rw-r--r--include/net/pkt_cls.h42
-rw-r--r--include/net/pkt_sched.h53
-rw-r--r--include/net/sch_generic.h54
-rw-r--r--include/net/sctp/auth.h8
-rw-r--r--include/net/sctp/checksum.h23
-rw-r--r--include/net/sctp/command.h18
-rw-r--r--include/net/sctp/constants.h8
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/sm.h8
-rw-r--r--include/net/sctp/structs.h29
-rw-r--r--include/net/sctp/tsnmap.h8
-rw-r--r--include/net/sctp/ulpevent.h8
-rw-r--r--include/net/sctp/ulpqueue.h8
-rw-r--r--include/net/sock.h29
-rw-r--r--include/net/tcp.h44
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vsock_addr.h (renamed from net/vmw_vsock/vsock_addr.h)0
-rw-r--r--include/net/vxlan.h40
-rw-r--r--include/net/xfrm.h2
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/can/gw.h9
-rw-r--r--include/uapi/linux/dn.h3
-rw-r--r--include/uapi/linux/fib_rules.h4
-rw-r--r--include/uapi/linux/icmpv6.h2
-rw-r--r--include/uapi/linux/if_bridge.h3
-rw-r--r--include/uapi/linux/if_link.h3
-rw-r--r--include/uapi/linux/if_packet.h1
-rw-r--r--include/uapi/linux/if_pppox.h2
-rw-r--r--include/uapi/linux/if_tun.h6
-rw-r--r--include/uapi/linux/in.h49
-rw-r--r--include/uapi/linux/in6.h36
-rw-r--r--include/uapi/linux/ip.h2
-rw-r--r--include/uapi/linux/ipv6.h3
-rw-r--r--include/uapi/linux/libc-compat.h103
-rw-r--r--include/uapi/linux/netfilter/Kbuild2
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h15
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h1
-rw-r--r--include/uapi/linux/netfilter/xt_HMARK.h (renamed from include/linux/netfilter/xt_HMARK.h)0
-rw-r--r--include/uapi/linux/netfilter/xt_SYNPROXY.h16
-rw-r--r--include/uapi/linux/netfilter/xt_rpfilter.h (renamed from include/linux/netfilter/xt_rpfilter.h)0
-rw-r--r--include/uapi/linux/netfilter_bridge/ebt_802_3.h5
-rw-r--r--include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h3
-rw-r--r--include/uapi/linux/nfc.h20
-rw-r--r--include/uapi/linux/nl80211.h199
-rw-r--r--include/uapi/linux/openvswitch.h26
-rw-r--r--include/uapi/linux/pkt_sched.h41
-rw-r--r--include/uapi/linux/sctp.h2
-rw-r--r--include/uapi/linux/snmp.h4
-rw-r--r--include/uapi/linux/tcp.h1
-rw-r--r--include/uapi/linux/virtio_net.h6
-rw-r--r--include/uapi/linux/wimax/i2400m.h4
-rw-r--r--net/8021q/vlan.c13
-rw-r--r--net/8021q/vlan_dev.c8
-rw-r--r--net/9p/client.c9
-rw-r--r--net/9p/trans_rdma.c11
-rw-r--r--net/Kconfig2
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c32
-rw-r--r--net/batman-adv/gateway_client.c27
-rw-r--r--net/batman-adv/gateway_client.h1
-rw-r--r--net/batman-adv/icmp_socket.c1
-rw-r--r--net/batman-adv/main.c58
-rw-r--r--net/batman-adv/main.h5
-rw-r--r--net/batman-adv/routing.c20
-rw-r--r--net/batman-adv/send.c1
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/batman-adv/sysfs.c4
-rw-r--r--net/batman-adv/translation-table.c5
-rw-r--r--net/batman-adv/unicast.c2
-rw-r--r--net/batman-adv/vis.c2
-rw-r--r--net/bluetooth/hci_conn.c62
-rw-r--r--net/bluetooth/hci_core.c14
-rw-r--r--net/bluetooth/hci_event.c29
-rw-r--r--net/bluetooth/hidp/core.c40
-rw-r--r--net/bluetooth/l2cap_core.c3
-rw-r--r--net/bluetooth/rfcomm/tty.c271
-rw-r--r--net/bluetooth/sco.c85
-rw-r--r--net/bridge/br_device.c12
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_mdb.c6
-rw-r--r--net/bridge/br_multicast.c17
-rw-r--r--net/bridge/br_notify.c5
-rw-r--r--net/bridge/br_private.h22
-rw-r--r--net/caif/cfctrl.c3
-rw-r--r--net/can/gw.c35
-rw-r--r--net/ceph/messenger.c2
-rw-r--r--net/core/datagram.c72
-rw-r--r--net/core/dev.c371
-rw-r--r--net/core/fib_rules.c25
-rw-r--r--net/core/flow_dissector.c6
-rw-r--r--net/core/iovec.c24
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net-sysfs.c23
-rw-r--r--net/core/pktgen.c61
-rw-r--r--net/core/rtnetlink.c29
-rw-r--r--net/core/skbuff.c19
-rw-r--r--net/core/sock.c166
-rw-r--r--net/core/stream.c2
-rw-r--r--net/core/sysctl_net_core.c30
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ieee802154/6lowpan.c286
-rw-r--r--net/ieee802154/6lowpan.h20
-rw-r--r--net/ipv4/Kconfig16
-rw-r--r--net/ipv4/af_inet.c12
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/devinet.c17
-rw-r--r--net/ipv4/fib_rules.c25
-rw-r--r--net/ipv4/igmp.c80
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_input.c8
-rw-r--r--net/ipv4/ip_tunnel.c71
-rw-r--r--net/ipv4/ip_tunnel_core.c10
-rw-r--r--net/ipv4/ip_vti.c528
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/ipmr.c18
-rw-r--r--net/ipv4/netfilter/Kconfig13
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c21
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c476
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c7
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/proc.c7
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/syncookies.c29
-rw-r--r--net/ipv4/sysctl_net_ipv4.c17
-rw-r--r--net/ipv4/tcp.c46
-rw-r--r--net/ipv4/tcp_fastopen.c13
-rw-r--r--net/ipv4/tcp_input.c201
-rw-r--r--net/ipv4/tcp_ipv4.c32
-rw-r--r--net/ipv4/tcp_metrics.c42
-rw-r--r--net/ipv4/tcp_minisocks.c8
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/ipv4/tcp_probe.c87
-rw-r--r--net/ipv4/udp.c18
-rw-r--r--net/ipv6/addrconf.c165
-rw-r--r--net/ipv6/addrconf_core.c50
-rw-r--r--net/ipv6/addrlabel.c48
-rw-r--r--net/ipv6/af_inet6.c15
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fib6_rules.c37
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_gre.c14
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c25
-rw-r--r--net/ipv6/ip6_tunnel.c46
-rw-r--r--net/ipv6/ip6mr.c14
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/mcast.c289
-rw-r--r--net/ipv6/ndisc.c45
-rw-r--r--net/ipv6/netfilter/Kconfig13
-rw-r--r--net/ipv6/netfilter/Makefile3
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c20
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c499
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c7
-rw-r--r--net/ipv6/output_core.c48
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/raw.c9
-rw-r--r--net/ipv6/route.c105
-rw-r--r--net/ipv6/sit.c15
-rw-r--r--net/ipv6/syncookies.c25
-rw-r--r--net/ipv6/tcp_ipv6.c15
-rw-r--r--net/ipv6/udp_offload.c105
-rw-r--r--net/ipx/ipx_proc.c2
-rw-r--r--net/irda/irttp.c50
-rw-r--r--net/key/af_key.c14
-rw-r--r--net/llc/af_llc.c6
-rw-r--r--net/llc/llc_conn.c6
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/llc/llc_sap.c4
-rw-r--r--net/mac80211/cfg.c247
-rw-r--r--net/mac80211/chan.c58
-rw-r--r--net/mac80211/debugfs_sta.c9
-rw-r--r--net/mac80211/driver-ops.h13
-rw-r--r--net/mac80211/ht.c53
-rw-r--r--net/mac80211/ibss.c356
-rw-r--r--net/mac80211/ieee80211_i.h70
-rw-r--r--net/mac80211/iface.c30
-rw-r--r--net/mac80211/key.c154
-rw-r--r--net/mac80211/led.c19
-rw-r--r--net/mac80211/led.h4
-rw-r--r--net/mac80211/main.c18
-rw-r--r--net/mac80211/mesh.c10
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c118
-rw-r--r--net/mac80211/rate.c69
-rw-r--r--net/mac80211/rate.h22
-rw-r--r--net/mac80211/rc80211_minstrel.c33
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c17
-rw-r--r--net/mac80211/rc80211_pid_algo.c1
-rw-r--r--net/mac80211/rx.c504
-rw-r--r--net/mac80211/scan.c72
-rw-r--r--net/mac80211/status.c90
-rw-r--r--net/mac80211/trace.h26
-rw-r--r--net/mac80211/tx.c122
-rw-r--r--net/mac80211/util.c218
-rw-r--r--net/netfilter/Kconfig26
-rw-r--r--net/netfilter/Makefile6
-rw-r--r--net/netfilter/core.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c89
-rw-r--r--net/netfilter/nf_conntrack_labels.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c384
-rw-r--r--net/netfilter/nf_conntrack_proto.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c36
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c238
-rw-r--r--net/netfilter/nf_nat_core.c22
-rw-r--r--net/netfilter/nf_nat_helper.c230
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c8
-rw-r--r--net/netfilter/nf_nat_sip.c3
-rw-r--r--net/netfilter/nf_synproxy_core.c432
-rw-r--r--net/netfilter/nf_tproxy_core.c62
-rw-r--r--net/netfilter/nfnetlink_queue_core.c11
-rw-r--r--net/netfilter/nfnetlink_queue_ct.c23
-rw-r--r--net/netfilter/xt_TCPMSS.c2
-rw-r--r--net/netfilter/xt_TPROXY.c169
-rw-r--r--net/netfilter/xt_addrtype.c2
-rw-r--r--net/netfilter/xt_socket.c66
-rw-r--r--net/netlink/af_netlink.c101
-rw-r--r--net/netlink/af_netlink.h3
-rw-r--r--net/nfc/core.c22
-rw-r--r--net/nfc/hci/core.c2
-rw-r--r--net/nfc/netlink.c95
-rw-r--r--net/nfc/nfc.h5
-rw-r--r--net/openvswitch/Kconfig14
-rw-r--r--net/openvswitch/Makefile9
-rw-r--r--net/openvswitch/actions.c45
-rw-r--r--net/openvswitch/datapath.c176
-rw-r--r--net/openvswitch/datapath.h6
-rw-r--r--net/openvswitch/flow.c1486
-rw-r--r--net/openvswitch/flow.h89
-rw-r--r--net/openvswitch/vport-gre.c7
-rw-r--r--net/openvswitch/vport-netdev.c20
-rw-r--r--net/openvswitch/vport-vxlan.c204
-rw-r--r--net/openvswitch/vport.c6
-rw-r--r--net/openvswitch/vport.h1
-rw-r--r--net/packet/af_packet.c65
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rfkill/rfkill-regulator.c8
-rw-r--r--net/sched/Kconfig14
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/sch_api.c53
-rw-r--r--net/sched/sch_fq.c793
-rw-r--r--net/sched/sch_generic.c20
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sctp/associola.c8
-rw-r--r--net/sctp/auth.c8
-rw-r--r--net/sctp/bind_addr.c8
-rw-r--r--net/sctp/chunk.c12
-rw-r--r--net/sctp/command.c8
-rw-r--r--net/sctp/debug.c8
-rw-r--r--net/sctp/endpointola.c8
-rw-r--r--net/sctp/input.c18
-rw-r--r--net/sctp/inqueue.c8
-rw-r--r--net/sctp/ipv6.c10
-rw-r--r--net/sctp/objcnt.c8
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/outqueue.c8
-rw-r--r--net/sctp/primitive.c8
-rw-r--r--net/sctp/probe.c27
-rw-r--r--net/sctp/proc.c12
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/sm_make_chunk.c133
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/sm_statetable.c8
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/ssnmap.c8
-rw-r--r--net/sctp/sysctl.c8
-rw-r--r--net/sctp/transport.c8
-rw-r--r--net/sctp/tsnmap.c8
-rw-r--r--net/sctp/ulpevent.c8
-rw-r--r--net/sctp/ulpqueue.c8
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/unix/af_unix.c70
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/vmw_vsock/vmci_transport.c2
-rw-r--r--net/vmw_vsock/vmci_transport.h4
-rw-r--r--net/vmw_vsock/vsock_addr.c3
-rw-r--r--net/wireless/core.c9
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/mesh.c5
-rw-r--r--net/wireless/mlme.c4
-rw-r--r--net/wireless/nl80211.c560
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/rdev-ops.h17
-rw-r--r--net/wireless/scan.c35
-rw-r--r--net/wireless/trace.h53
-rw-r--r--net/wireless/util.c14
-rw-r--r--net/x25/x25_facilities.c4
-rw-r--r--net/xfrm/xfrm_policy.c12
-rw-r--r--net/xfrm/xfrm_state.c15
-rw-r--r--security/selinux/include/xfrm.h7
974 files changed, 56727 insertions, 23917 deletions
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index 49267ea97568..f403ec3c5c9a 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -325,6 +325,7 @@
<title>functions/definitions</title>
!Finclude/net/mac80211.h ieee80211_rx_status
!Finclude/net/mac80211.h mac80211_rx_flags
+!Finclude/net/mac80211.h mac80211_tx_info_flags
!Finclude/net/mac80211.h mac80211_tx_control_flags
!Finclude/net/mac80211.h mac80211_rate_control_flags
!Finclude/net/mac80211.h ieee80211_tx_rate
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
new file mode 100644
index 000000000000..997a63f1aea1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
@@ -0,0 +1,49 @@
+Micrel KSZ9021 Gigabit Ethernet PHY
+
+Some boards require special tuning values, particularly when it comes to
+clock delays. You can specify clock delay values by adding
+micrel-specific properties to an Ethernet OF device node.
+
+All skew control options are specified in picoseconds. The minimum
+value is 0, and the maximum value is 3000.
+
+Optional properties:
+ - rxc-skew-ps : Skew control of RXC pad
+ - rxdv-skew-ps : Skew control of RX CTL pad
+ - txc-skew-ps : Skew control of TXC pad
+ - txen-skew-ps : Skew control of TX_CTL pad
+ - rxd0-skew-ps : Skew control of RX data 0 pad
+ - rxd1-skew-ps : Skew control of RX data 1 pad
+ - rxd2-skew-ps : Skew control of RX data 2 pad
+ - rxd3-skew-ps : Skew control of RX data 3 pad
+ - txd0-skew-ps : Skew control of TX data 0 pad
+ - txd1-skew-ps : Skew control of TX data 1 pad
+ - txd2-skew-ps : Skew control of TX data 2 pad
+ - txd3-skew-ps : Skew control of TX data 3 pad
+
+Examples:
+
+ /* Attach to an Ethernet device with autodetected PHY */
+ &enet {
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+ txc-skew-ps = <3000>;
+ txen-skew-ps = <0>;
+ status = "okay";
+ };
+
+ /* Attach to an explicitly-specified PHY */
+ mdio {
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+ txc-skew-ps = <3000>;
+ txen-skew-ps = <0>;
+ reg = <0>;
+ };
+ };
+ ethernet@70000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+ };
diff --git a/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt b/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt
new file mode 100644
index 000000000000..583418b2c127
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt
@@ -0,0 +1,21 @@
+MOXA ART Ethernet Controller
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-mac"
+- reg : Should contain register location and length
+- interrupts : Should contain the mac interrupt number
+
+Example:
+
+ mac0: mac@90900000 {
+ compatible = "moxa,moxart-mac";
+ reg = <0x90900000 0x100>;
+ interrupts = <25 0>;
+ };
+
+ mac1: mac@92000000 {
+ compatible = "moxa,moxart-mac";
+ reg = <0x92000000 0x100>;
+ interrupts = <27 0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 261c563b5f06..eba0e5e59ebe 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -22,6 +22,11 @@ Required properties:
- snps,pbl Programmable Burst Length
- snps,fixed-burst Program the DMA to use the fixed burst mode
- snps,mixed-burst Program the DMA to use the mixed burst mode
+- snps,force_thresh_dma_mode Force DMA to use the threshold mode for
+ both tx and rx
+- snps,force_sf_dma_mode Force DMA to use the Store and Forward
+ mode for both tx and rx. This flag is
+ ignored if force_thresh_dma_mode is set.
Optional properties:
- mac-address: 6 bytes, mac address
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 32dfbd924121..18b64b2b8a68 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -124,6 +124,8 @@ multiqueue.txt
- HOWTO for multiqueue network device support.
netconsole.txt
- The network console module netconsole.ko: configuration and notes.
+netdev-FAQ.txt
+ - FAQ describing how to submit net changes to netdev mailing list.
netdev-features.txt
- Network interface features API description.
netdevices.txt
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index fcb6c71cdb69..13a32124bca0 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -1,7 +1,7 @@
Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
==============================================================
-November 15, 2005
+March 15, 2011
Contents
========
@@ -122,7 +122,7 @@ Additional Configurations
NOTE: This setting is not saved across reboots.
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt
index 71ca95855671..437b2099cced 100644
--- a/Documentation/networking/e1000.txt
+++ b/Documentation/networking/e1000.txt
@@ -1,8 +1,8 @@
-Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters
-===============================================================
+Linux* Base Driver for Intel(R) Ethernet Network Connection
+===========================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -420,15 +420,15 @@ Additional Configurations
- The maximum MTU setting for Jumbo Frames is 16110. This value coincides
with the maximum Jumbo Frames size of 16128.
- - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
- loss of link.
+ - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
+ poor performance or loss of link.
- Adapters based on the Intel(R) 82542 and 82573V/E controller do not
support Jumbo Frames. These correspond to the following product names:
Intel(R) PRO/1000 Gigabit Server Adapter
Intel(R) PRO/1000 PM Network Connection
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool
diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt
index 97b5ba942ebf..ad2d9f38ce14 100644
--- a/Documentation/networking/e1000e.txt
+++ b/Documentation/networking/e1000e.txt
@@ -1,8 +1,8 @@
-Linux* Driver for Intel(R) Network Connection
-=============================================
+Linux* Driver for Intel(R) Ethernet Network Connection
+======================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -259,13 +259,16 @@ Additional Configurations
- The maximum MTU setting for Jumbo Frames is 9216. This value coincides
with the maximum Jumbo Frames size of 9234 bytes.
- - Using Jumbo Frames at 10 or 100 Mbps is not supported and may result in
+ - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
poor performance or loss of link.
- Some adapters limit Jumbo Frames sized packets to a maximum of
4096 bytes and some adapters do not support Jumbo Frames.
- Ethtool
+ - Jumbo Frames cannot be configured on an 82579-based Network device, if
+ MACSec is enabled on the system.
+
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. We
@@ -273,6 +276,9 @@ Additional Configurations
http://ftp.kernel.org/pub/software/network/ethtool/
+ NOTE: When validating enable/disable tests on some parts (82578, for example)
+ you need to add a few seconds between tests when working with ethtool.
+
Speed and Duplex
----------------
Speed and Duplex are configured through the ethtool* utility. For
diff --git a/Documentation/networking/igb.txt b/Documentation/networking/igb.txt
index 9a2a037194a5..4ebbd659256f 100644
--- a/Documentation/networking/igb.txt
+++ b/Documentation/networking/igb.txt
@@ -1,8 +1,8 @@
-Linux* Base Driver for Intel(R) Network Connection
-==================================================
+Linux* Base Driver for Intel(R) Ethernet Network Connection
+===========================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -36,6 +36,53 @@ Default Value: 0
This parameter adds support for SR-IOV. It causes the driver to spawn up to
max_vfs worth of virtual function.
+QueuePairs
+----------
+Valid Range: 0-1
+Default Value: 1 (TX and RX will be paired onto one interrupt vector)
+
+If set to 0, when MSI-X is enabled, the TX and RX will attempt to occupy
+separate vectors.
+
+This option can be overridden to 1 if there are not sufficient interrupts
+available. This can occur if any combination of RSS, VMDQ, and max_vfs
+results in more than 4 queues being used.
+
+Node
+----
+Valid Range: 0-n
+Default Value: -1 (off)
+
+ 0 - n: where n is the number of the NUMA node that should be used to
+ allocate memory for this adapter port.
+ -1: uses the driver default of allocating memory on whichever processor is
+ running insmod/modprobe.
+
+ The Node parameter will allow you to pick which NUMA node you want to have
+ the adapter allocate memory from. All driver structures, in-memory queues,
+ and receive buffers will be allocated on the node specified. This parameter
+ is only useful when interrupt affinity is specified, otherwise some portion
+ of the time the interrupt could run on a different core than the memory is
+ allocated on, causing slower memory access and impacting throughput, CPU, or
+ both.
+
+EEE
+---
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+ A link between two EEE-compliant devices will result in periodic bursts of
+ data followed by long periods where in the link is in an idle state. This Low
+ Power Idle (LPI) state is supported in both 1Gbps and 100Mbps link speeds.
+ NOTE: EEE support requires autonegotiation.
+
+DMAC
+----
+Valid Range: 0-1
+Default Value: 1 (enabled)
+ Enables or disables DMA Coalescing feature.
+
+
Additional Configurations
=========================
@@ -55,10 +102,10 @@ Additional Configurations
- The maximum MTU setting for Jumbo Frames is 9216. This value coincides
with the maximum Jumbo Frames size of 9234 bytes.
- - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
- loss of link.
+ - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
+ poor performance or loss of link.
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The latest
@@ -106,6 +153,14 @@ Additional Configurations
Where n=the VF that attempted to do the spoofing.
+ Setting MAC Address, VLAN and Rate Limit Using IProute2 Tool
+ ------------------------------------------------------------
+ You can set a MAC address of a Virtual Function (VF), a default VLAN and the
+ rate limit using the IProute2 tool. Download the latest version of the
+ iproute2 tool from Sourceforge if your version does not have all the
+ features you require.
+
+
Support
=======
diff --git a/Documentation/networking/igbvf.txt b/Documentation/networking/igbvf.txt
index cbfe4ee65533..40db17a6665b 100644
--- a/Documentation/networking/igbvf.txt
+++ b/Documentation/networking/igbvf.txt
@@ -1,8 +1,8 @@
-Linux* Base Driver for Intel(R) Network Connection
-==================================================
+Linux* Base Driver for Intel(R) Ethernet Network Connection
+===========================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -55,7 +55,7 @@ networking link on the left to search for your adapter:
Additional Configurations
=========================
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 10742902146f..a46d78583ae1 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -440,6 +440,10 @@ tcp_syncookies - BOOLEAN
SYN flood warnings in logs not being really flooded, your server
is seriously misconfigured.
+ If you want to test which effects syncookies have to your
+ network connections you can set this knob to 2 to enable
+ unconditionally generation of syncookies.
+
tcp_fastopen - INTEGER
Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
in the opening SYN packet. To use this feature, the client application
@@ -478,6 +482,15 @@ tcp_syn_retries - INTEGER
tcp_timestamps - BOOLEAN
Enable timestamps as defined in RFC1323.
+tcp_min_tso_segs - INTEGER
+ Minimal number of segments per TSO frame.
+ Since linux-3.12, TCP does an automatic sizing of TSO frames,
+ depending on flow rate, instead of filling 64Kbytes packets.
+ For specific usages, it's possible to force TCP to build big
+ TSO frames. Note that TCP stack might split too big TSO packets
+ if available window is too small.
+ Default: 2
+
tcp_tso_win_divisor - INTEGER
This allows control over what percentage of the congestion window
can be consumed by a single TSO frame.
@@ -516,6 +529,19 @@ tcp_wmem - vector of 3 INTEGERs: min, default, max
this value is ignored.
Default: between 64K and 4MB, depending on RAM size.
+tcp_notsent_lowat - UNSIGNED INTEGER
+ A TCP socket can control the amount of unsent bytes in its write queue,
+ thanks to TCP_NOTSENT_LOWAT socket option. poll()/select()/epoll()
+ reports POLLOUT events if the amount of unsent bytes is below a per
+ socket value, and if the write queue is not full. sendmsg() will
+ also not add new buffers if the limit is hit.
+
+ This global variable controls the amount of unsent data for
+ sockets not using TCP_NOTSENT_LOWAT. For these sockets, a change
+ to the global variable has immediate effect.
+
+ Default: UINT_MAX (0xFFFFFFFF)
+
tcp_workaround_signed_windows - BOOLEAN
If set, assume no receipt of a window scaling option means the
remote TCP is broken and treats the window as a signed quantity.
@@ -1022,7 +1048,15 @@ disable_policy - BOOLEAN
disable_xfrm - BOOLEAN
Disable IPSEC encryption on this interface, whatever the policy
+igmpv2_unsolicited_report_interval - INTEGER
+ The interval in milliseconds in which the next unsolicited
+ IGMPv1 or IGMPv2 report retransmit will take place.
+ Default: 10000 (10 seconds)
+igmpv3_unsolicited_report_interval - INTEGER
+ The interval in milliseconds in which the next unsolicited
+ IGMPv3 report retransmit will take place.
+ Default: 1000 (1 seconds)
tag - INTEGER
Allows you to write a number, which can be used as required.
@@ -1314,6 +1348,27 @@ ndisc_notify - BOOLEAN
1 - Generate unsolicited neighbour advertisements when device is brought
up or hardware address changes.
+mldv1_unsolicited_report_interval - INTEGER
+ The interval in milliseconds in which the next unsolicited
+ MLDv1 report retransmit will take place.
+ Default: 10000 (10 seconds)
+
+mldv2_unsolicited_report_interval - INTEGER
+ The interval in milliseconds in which the next unsolicited
+ MLDv2 report retransmit will take place.
+ Default: 1000 (1 second)
+
+force_mld_version - INTEGER
+ 0 - (default) No enforcement of a MLD version, MLDv1 fallback allowed
+ 1 - Enforce to use MLD version 1
+ 2 - Enforce to use MLD version 2
+
+suppress_frag_ndisc - INTEGER
+ Control RFC 6980 (Security Implications of IPv6 Fragmentation
+ with IPv6 Neighbor Discovery) behavior:
+ 1 - (default) discard fragmented neighbor discovery packets
+ 0 - allow fragmented neighbor discovery packets
+
icmp/*:
ratelimit - INTEGER
Limit the maximal rates for sending ICMPv6 packets.
diff --git a/Documentation/networking/ixgb.txt b/Documentation/networking/ixgb.txt
index d75a1f9565bb..1e0c045e89f7 100644
--- a/Documentation/networking/ixgb.txt
+++ b/Documentation/networking/ixgb.txt
@@ -1,7 +1,7 @@
-Linux Base Driver for 10 Gigabit Intel(R) Network Connection
-=============================================================
+Linux Base Driver for 10 Gigabit Intel(R) Ethernet Network Connection
+=====================================================================
-October 9, 2007
+March 14, 2011
Contents
@@ -274,9 +274,9 @@ Additional Configurations
-------------------------------------------------
Configuring a network driver to load properly when the system is started is
distribution dependent. Typically, the configuration process involves adding
- an alias line to files in /etc/modprobe.d/ as well as editing other system
- startup scripts and/or configuration files. Many popular Linux distributions
- ship with tools to make these changes for you. To learn the proper way to
+ an alias line to /etc/modprobe.conf as well as editing other system startup
+ scripts and/or configuration files. Many popular Linux distributions ship
+ with tools to make these changes for you. To learn the proper way to
configure a network device for your system, refer to your distribution
documentation. If during this process you are asked for the driver or module
name, the name for the Linux Base Driver for the Intel 10GbE Family of
@@ -306,7 +306,7 @@ Additional Configurations
with the maximum Jumbo Frames size of 16128.
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool
diff --git a/Documentation/networking/ixgbe.txt b/Documentation/networking/ixgbe.txt
index af77ed3c4172..96cccebb839b 100644
--- a/Documentation/networking/ixgbe.txt
+++ b/Documentation/networking/ixgbe.txt
@@ -1,8 +1,9 @@
-Linux Base Driver for 10 Gigabit PCI Express Intel(R) Network Connection
-========================================================================
+Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Family of
+Adapters
+=============================================================================
-Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Intel 10 Gigabit Linux driver.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -16,8 +17,8 @@ Contents
Identifying Your Adapter
========================
-The driver in this release is compatible with 82598 and 82599-based Intel
-Network Connections.
+The driver in this release is compatible with 82598, 82599 and X540-based
+Intel Network Connections.
For more information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
@@ -72,7 +73,7 @@ cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
Laser turns off for SFP+ when ifconfig down
-------------------------------------------
"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
-"ifconfig up" turns on the later.
+"ifconfig up" turns on the laser.
82598-BASED ADAPTERS
@@ -118,6 +119,93 @@ NOTE: For 82598 backplane cards entering 1 gig mode, flow control default
behavior is changed to off. Flow control in 1 gig mode on these devices can
lead to Tx hangs.
+Intel(R) Ethernet Flow Director
+-------------------------------
+Supports advanced filters that direct receive packets by their flows to
+different queues. Enables tight control on routing a flow in the platform.
+Matches flows and CPU cores for flow affinity. Supports multiple parameters
+for flexible flow classification and load balancing.
+
+Flow director is enabled only if the kernel is multiple TX queue capable.
+
+An included script (set_irq_affinity.sh) automates setting the IRQ to CPU
+affinity.
+
+You can verify that the driver is using Flow Director by looking at the counter
+in ethtool: fdir_miss and fdir_match.
+
+Other ethtool Commands:
+To enable Flow Director
+ ethtool -K ethX ntuple on
+To add a filter
+ Use -U switch. e.g., ethtool -U ethX flow-type tcp4 src-ip 0x178000a
+ action 1
+To see the list of filters currently present:
+ ethtool -u ethX
+
+Perfect Filter: Perfect filter is an interface to load the filter table that
+funnels all flow into queue_0 unless an alternative queue is specified using
+"action". In that case, any flow that matches the filter criteria will be
+directed to the appropriate queue.
+
+If the queue is defined as -1, filter will drop matching packets.
+
+To account for filter matches and misses, there are two stats in ethtool:
+fdir_match and fdir_miss. In addition, rx_queue_N_packets shows the number of
+packets processed by the Nth queue.
+
+NOTE: Receive Packet Steering (RPS) and Receive Flow Steering (RFS) are not
+compatible with Flow Director. IF Flow Director is enabled, these will be
+disabled.
+
+The following three parameters impact Flow Director.
+
+FdirMode
+--------
+Valid Range: 0-2 (0=off, 1=ATR, 2=Perfect filter mode)
+Default Value: 1
+
+ Flow Director filtering modes.
+
+FdirPballoc
+-----------
+Valid Range: 0-2 (0=64k, 1=128k, 2=256k)
+Default Value: 0
+
+ Flow Director allocated packet buffer size.
+
+AtrSampleRate
+--------------
+Valid Range: 1-100
+Default Value: 20
+
+ Software ATR Tx packet sample rate. For example, when set to 20, every 20th
+ packet, looks to see if the packet will create a new flow.
+
+Node
+----
+Valid Range: 0-n
+Default Value: 1 (off)
+
+ 0 - n: where n is the number of NUMA nodes (i.e. 0 - 3) currently online in
+ your system
+ 1: turns this option off
+
+ The Node parameter will allow you to pick which NUMA node you want to have
+ the adapter allocate memory on.
+
+max_vfs
+-------
+Valid Range: 1-63
+Default Value: 0
+
+ If the value is greater than 0 it will also force the VMDq parameter to be 1
+ or more.
+
+ This parameter adds support for SR-IOV. It causes the driver to spawn up to
+ max_vfs worth of virtual function.
+
+
Additional Configurations
=========================
@@ -221,9 +309,10 @@ http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
Known Issues
============
- Enabling SR-IOV in a 32-bit Microsoft* Windows* Server 2008 Guest OS using
- Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE controller under KVM
- -----------------------------------------------------------------------------
+ Enabling SR-IOV in a 32-bit or 64-bit Microsoft* Windows* Server 2008/R2
+ Guest OS using Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE
+ controller under KVM
+ ------------------------------------------------------------------------
KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
includes traditional PCIe devices, as well as SR-IOV-capable devices using
Intel 82576-based and 82599-based controllers.
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
index 5a91a41fa946..53d8d2a5a6a3 100644
--- a/Documentation/networking/ixgbevf.txt
+++ b/Documentation/networking/ixgbevf.txt
@@ -1,8 +1,8 @@
-Linux* Base Driver for Intel(R) Network Connection
-==================================================
+Linux* Base Driver for Intel(R) Ethernet Network Connection
+===========================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
new file mode 100644
index 000000000000..d9112f01c44a
--- /dev/null
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -0,0 +1,224 @@
+
+Information you need to know about netdev
+-----------------------------------------
+
+Q: What is netdev?
+
+A: It is a mailing list for all network related linux stuff. This includes
+ anything found under net/ (i.e. core code like IPv6) and drivers/net
+ (i.e. hardware specific drivers) in the linux source tree.
+
+ Note that some subsystems (e.g. wireless drivers) which have a high volume
+ of traffic have their own specific mailing lists.
+
+ The netdev list is managed (like many other linux mailing lists) through
+ VGER ( http://vger.kernel.org/ ) and archives can be found below:
+
+ http://marc.info/?l=linux-netdev
+ http://www.spinics.net/lists/netdev/
+
+ Aside from subsystems like that mentioned above, all network related linux
+ development (i.e. RFC, review, comments, etc) takes place on netdev.
+
+Q: How do the changes posted to netdev make their way into linux?
+
+A: There are always two trees (git repositories) in play. Both are driven
+ by David Miller, the main network maintainer. There is the "net" tree,
+ and the "net-next" tree. As you can probably guess from the names, the
+ net tree is for fixes to existing code already in the mainline tree from
+ Linus, and net-next is where the new code goes for the future release.
+ You can find the trees here:
+
+ http://git.kernel.org/?p=linux/kernel/git/davem/net.git
+ http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git
+
+Q: How often do changes from these trees make it to the mainline Linus tree?
+
+A: To understand this, you need to know a bit of background information
+ on the cadence of linux development. Each new release starts off with
+ a two week "merge window" where the main maintainers feed their new
+ stuff to Linus for merging into the mainline tree. After the two weeks,
+ the merge window is closed, and it is called/tagged "-rc1". No new
+ features get mainlined after this -- only fixes to the rc1 content
+ are expected. After roughly a week of collecting fixes to the rc1
+ content, rc2 is released. This repeats on a roughly weekly basis
+ until rc7 (typically; sometimes rc6 if things are quiet, or rc8 if
+ things are in a state of churn), and a week after the last vX.Y-rcN
+ was done, the official "vX.Y" is released.
+
+ Relating that to netdev: At the beginning of the 2 week merge window,
+ the net-next tree will be closed - no new changes/features. The
+ accumulated new content of the past ~10 weeks will be passed onto
+ mainline/Linus via a pull request for vX.Y -- at the same time,
+ the "net" tree will start accumulating fixes for this pulled content
+ relating to vX.Y
+
+ An announcement indicating when net-next has been closed is usually
+ sent to netdev, but knowing the above, you can predict that in advance.
+
+ IMPORTANT: Do not send new net-next content to netdev during the
+ period during which net-next tree is closed.
+
+ Shortly after the two weeks have passed, (and vX.Y-rc1 is released) the
+ tree for net-next reopens to collect content for the next (vX.Y+1) release.
+
+ If you aren't subscribed to netdev and/or are simply unsure if net-next
+ has re-opened yet, simply check the net-next git repository link above for
+ any new networking related commits.
+
+ The "net" tree continues to collect fixes for the vX.Y content, and
+ is fed back to Linus at regular (~weekly) intervals. Meaning that the
+ focus for "net" is on stablilization and bugfixes.
+
+ Finally, the vX.Y gets released, and the whole cycle starts over.
+
+Q: So where are we now in this cycle?
+
+A: Load the mainline (Linus) page here:
+
+ http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
+
+ and note the top of the "tags" section. If it is rc1, it is early
+ in the dev cycle. If it was tagged rc7 a week ago, then a release
+ is probably imminent.
+
+Q: How do I indicate which tree (net vs. net-next) my patch should be in?
+
+A: Firstly, think whether you have a bug fix or new "next-like" content.
+ Then once decided, assuming that you use git, use the prefix flag, i.e.
+
+ git format-patch --subject-prefix='PATCH net-next' start..finish
+
+ Use "net" instead of "net-next" (always lower case) in the above for
+ bug-fix net content. If you don't use git, then note the only magic in
+ the above is just the subject text of the outgoing e-mail, and you can
+ manually change it yourself with whatever MUA you are comfortable with.
+
+Q: I sent a patch and I'm wondering what happened to it. How can I tell
+ whether it got merged?
+
+A: Start by looking at the main patchworks queue for netdev:
+
+ http://patchwork.ozlabs.org/project/netdev/list/
+
+ The "State" field will tell you exactly where things are at with
+ your patch.
+
+Q: The above only says "Under Review". How can I find out more?
+
+A: Generally speaking, the patches get triaged quickly (in less than 48h).
+ So be patient. Asking the maintainer for status updates on your
+ patch is a good way to ensure your patch is ignored or pushed to
+ the bottom of the priority list.
+
+Q: How can I tell what patches are queued up for backporting to the
+ various stable releases?
+
+A: Normally Greg Kroah-Hartman collects stable commits himself, but
+ for networking, Dave collects up patches he deems critical for the
+ networking subsystem, and then hands them off to Greg.
+
+ There is a patchworks queue that you can see here:
+ http://patchwork.ozlabs.org/bundle/davem/stable/?state=*
+
+ It contains the patches which Dave has selected, but not yet handed
+ off to Greg. If Greg already has the patch, then it will be here:
+ http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git
+
+ A quick way to find whether the patch is in this stable-queue is
+ to simply clone the repo, and then git grep the mainline commit ID, e.g.
+
+ stable-queue$ git grep -l 284041ef21fdf2e
+ releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ stable/stable-queue$
+
+Q: I see a network patch and I think it should be backported to stable.
+ Should I request it via "stable@vger.kernel.org" like the references in
+ the kernel's Documentation/stable_kernel_rules.txt file say?
+
+A: No, not for networking. Check the stable queues as per above 1st to see
+ if it is already queued. If not, then send a mail to netdev, listing
+ the upstream commit ID and why you think it should be a stable candidate.
+
+ Before you jump to go do the above, do note that the normal stable rules
+ in Documentation/stable_kernel_rules.txt still apply. So you need to
+ explicitly indicate why it is a critical fix and exactly what users are
+ impacted. In addition, you need to convince yourself that you _really_
+ think it has been overlooked, vs. having been considered and rejected.
+
+ Generally speaking, the longer it has had a chance to "soak" in mainline,
+ the better the odds that it is an OK candidate for stable. So scrambling
+ to request a commit be added the day after it appears should be avoided.
+
+Q: I have created a network patch and I think it should be backported to
+ stable. Should I add a "Cc: stable@vger.kernel.org" like the references
+ in the kernel's Documentation/ directory say?
+
+A: No. See above answer. In short, if you think it really belongs in
+ stable, then ensure you write a decent commit log that describes who
+ gets impacted by the bugfix and how it manifests itself, and when the
+ bug was introduced. If you do that properly, then the commit will
+ get handled appropriately and most likely get put in the patchworks
+ stable queue if it really warrants it.
+
+ If you think there is some valid information relating to it being in
+ stable that does _not_ belong in the commit log, then use the three
+ dash marker line as described in Documentation/SubmittingPatches to
+ temporarily embed that information into the patch that you send.
+
+Q: Someone said that the comment style and coding convention is different
+ for the networking content. Is this true?
+
+A: Yes, in a largely trivial way. Instead of this:
+
+ /*
+ * foobar blah blah blah
+ * another line of text
+ */
+
+ it is requested that you make it look like this:
+
+ /* foobar blah blah blah
+ * another line of text
+ */
+
+Q: I am working in existing code that has the former comment style and not the
+ latter. Should I submit new code in the former style or the latter?
+
+A: Make it the latter style, so that eventually all code in the domain of
+ netdev is of this format.
+
+Q: I found a bug that might have possible security implications or similar.
+ Should I mail the main netdev maintainer off-list?
+
+A: No. The current netdev maintainer has consistently requested that people
+ use the mailing lists and not reach out directly. If you aren't OK with
+ that, then perhaps consider mailing "security@kernel.org" or reading about
+ http://oss-security.openwall.org/wiki/mailing-lists/distros
+ as possible alternative mechanisms.
+
+Q: What level of testing is expected before I submit my change?
+
+A: If your changes are against net-next, the expectation is that you
+ have tested by layering your changes on top of net-next. Ideally you
+ will have done run-time testing specific to your change, but at a
+ minimum, your changes should survive an "allyesconfig" and an
+ "allmodconfig" build without new warnings or failures.
+
+Q: Any other tips to help ensure my net/net-next patch gets OK'd?
+
+A: Attention to detail. Re-read your own work as if you were the
+ reviewer. You can start with using checkpatch.pl, perhaps even
+ with the "--strict" flag. But do not be mindlessly robotic in
+ doing so. If your change is a bug-fix, make sure your commit log
+ indicates the end-user visible symptom, the underlying reason as
+ to why it happens, and then if necessary, explain why the fix proposed
+ is the best way to get things done. Don't mangle whitespace, and as
+ is common, don't mis-indent function arguments that span multiple lines.
+ If it is your 1st patch, mail it to yourself so you can test apply
+ it to an unpatched tree to confirm infrastructure didn't mangle it.
+
+ Finally, go back and read Documentation/SubmittingPatches to be
+ sure you are not repeating some common mistake documented there.
diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt
index 8fa2dd1e792e..37c20ee2455e 100644
--- a/Documentation/networking/openvswitch.txt
+++ b/Documentation/networking/openvswitch.txt
@@ -91,6 +91,46 @@ Often we ellipsize arguments not important to the discussion, e.g.:
in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...)
+Wildcarded flow key format
+--------------------------
+
+A wildcarded flow is described with two sequences of Netlink attributes
+passed over the Netlink socket. A flow key, exactly as described above, and an
+optional corresponding flow mask.
+
+A wildcarded flow can represent a group of exact match flows. Each '1' bit
+in the mask specifies a exact match with the corresponding bit in the flow key.
+A '0' bit specifies a don't care bit, which will match either a '1' or '0' bit
+of a incoming packet. Using wildcarded flow can improve the flow set up rate
+by reduce the number of new flows need to be processed by the user space program.
+
+Support for the mask Netlink attribute is optional for both the kernel and user
+space program. The kernel can ignore the mask attribute, installing an exact
+match flow, or reduce the number of don't care bits in the kernel to less than
+what was specified by the user space program. In this case, variations in bits
+that the kernel does not implement will simply result in additional flow setups.
+The kernel module will also work with user space programs that neither support
+nor supply flow mask attributes.
+
+Since the kernel may ignore or modify wildcard bits, it can be difficult for
+the userspace program to know exactly what matches are installed. There are
+two possible approaches: reactively install flows as they miss the kernel
+flow table (and therefore not attempt to determine wildcard changes at all)
+or use the kernel's response messages to determine the installed wildcards.
+
+When interacting with userspace, the kernel should maintain the match portion
+of the key exactly as originally installed. This will provides a handle to
+identify the flow for all future operations. However, when reporting the
+mask of an installed flow, the mask should include any restrictions imposed
+by the kernel.
+
+The behavior when using overlapping wildcarded flows is undefined. It is the
+responsibility of the user space program to ensure that any incoming packet
+can match at most one flow, wildcarded or not. The current implementation
+performs best-effort detection of overlapping wildcarded flows and may reject
+some but not all of them. However, this behavior may change in future versions.
+
+
Basic rule for evolving flow keys
---------------------------------
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 8572796b1eb6..c01223628a87 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -543,6 +543,14 @@ TPACKET_V2 --> TPACKET_V3:
In the AF_PACKET fanout mode, packet reception can be load balanced among
processes. This also works in combination with mmap(2) on packet sockets.
+Currently implemented fanout policies are:
+
+ - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
+ - PACKET_FANOUT_LB: schedule to socket by round-robin
+ - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
+ - PACKET_FANOUT_RND: schedule to socket by random selection
+ - PACKET_FANOUT_ROLLOVER: if one socket is full, rollover to another
+
Minimal example code by David S. Miller (try things like "./test eth0 hash",
"./test eth0 lb", etc.):
diff --git a/Documentation/networking/sctp.txt b/Documentation/networking/sctp.txt
index 0c790a76910e..97b810ca9082 100644
--- a/Documentation/networking/sctp.txt
+++ b/Documentation/networking/sctp.txt
@@ -19,7 +19,6 @@ of SCTP that is RFC 2960 compliant and provides an programming interface
referred to as the UDP-style API of the Sockets Extensions for SCTP, as
proposed in IETF Internet-Drafts.
-
Caveats:
-lksctp can be built as statically or as a module. However, be aware that
@@ -33,6 +32,4 @@ For more information, please visit the lksctp project website:
http://www.sf.net/projects/lksctp
Or contact the lksctp developers through the mailing list:
- <lksctp-developers@lists.sourceforge.net>
-
-
+ <linux-sctp@vger.kernel.org>
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 654d2e55c8cb..457b8bbafb08 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -123,6 +123,7 @@ struct plat_stmmacenet_data {
int bugged_jumbo;
int pmt;
int force_sf_dma_mode;
+ int force_thresh_dma_mode;
int riwt_off;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
@@ -159,6 +160,8 @@ Where:
o pmt: core has the embedded power module (optional).
o force_sf_dma_mode: force DMA to use the Store and Forward mode
instead of the Threshold.
+ o force_thresh_dma_mode: force DMA to use the Shreshold mode other than
+ the Store and Forward mode.
o riwt_off: force to disable the RX watchdog feature and switch to NAPI mode.
o fix_mac_speed: this callback is used for modifying some syscfg registers
(on ST SoCs) according to the link speed negotiated by the
diff --git a/Documentation/networking/tproxy.txt b/Documentation/networking/tproxy.txt
index 7b5996d9357e..ec11429e1d42 100644
--- a/Documentation/networking/tproxy.txt
+++ b/Documentation/networking/tproxy.txt
@@ -2,9 +2,8 @@ Transparent proxy support
=========================
This feature adds Linux 2.2-like transparent proxy support to current kernels.
-To use it, enable NETFILTER_TPROXY, the socket match and the TPROXY target in
-your kernel config. You will need policy routing too, so be sure to enable that
-as well.
+To use it, enable the socket match and the TPROXY target in your kernel config.
+You will need policy routing too, so be sure to enable that as well.
1. Making non-local sockets work
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index d569f2a424d5..9a0319a82470 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -50,6 +50,19 @@ The maximum number of packets that kernel can handle on a NAPI interrupt,
it's a Per-CPU variable.
Default: 64
+default_qdisc
+--------------
+
+The default queuing discipline to use for network devices. This allows
+overriding the default queue discipline of pfifo_fast with an
+alternative. Since the default queuing discipline is created with the
+no additional parameters so is best suited to queuing disciplines that
+work well without configuration like stochastic fair queue (sfq),
+CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines
+like Hierarchical Token Bucket or Deficit Round Robin which require setting
+up classes and bandwidths.
+Default: pfifo_fast
+
busy_read
----------------
Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)
diff --git a/MAINTAINERS b/MAINTAINERS
index c9fedf12f144..9d8ab7c681f9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2108,7 +2108,8 @@ F: drivers/usb/chipidea/
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
-M: Roopa Prabhu <roprabhu@cisco.com>
+M: Sujith Sankar <ssujith@cisco.com>
+M: Govindarajulu Varadarajan <govindarajulu90@gmail.com>
M: Neel Patel <neepatel@cisco.com>
M: Nishank Trivedi <nistrive@cisco.com>
S: Supported
@@ -4404,7 +4405,7 @@ F: drivers/net/wireless/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi)
M: Johannes Berg <johannes.berg@intel.com>
-M: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
M: Intel Linux Wireless <ilw@linux.intel.com>
L: linux-wireless@vger.kernel.org
W: http://intellinuxwireless.org
@@ -5831,7 +5832,7 @@ M: Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
M: Samuel Ortiz <sameo@linux.intel.com>
L: linux-wireless@vger.kernel.org
L: linux-nfc@lists.01.org (moderated for non-subscribers)
-S: Maintained
+S: Supported
F: net/nfc/
F: include/net/nfc/
F: include/uapi/linux/nfc.h
@@ -7274,6 +7275,7 @@ W: http://lksctp.sourceforge.net
S: Maintained
F: Documentation/networking/sctp.txt
F: include/linux/sctp.h
+F: include/uapi/linux/sctp.h
F: include/net/sctp/
F: net/sctp/
@@ -8022,6 +8024,12 @@ F: arch/m68k/sun3*/
F: arch/m68k/include/asm/sun3*
F: drivers/net/ethernet/i825xx/sun3*
+SUNDANCE NETWORK DRIVER
+M: Denis Kirjanov <kda@linux-powerpc.org>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/dlink/sundance.c
+
SUPERH
M: Paul Mundt <lethal@linux-sh.org>
L: linux-sh@vger.kernel.org
diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi
index 8a9e05d8a4b8..dba739b6ef36 100644
--- a/arch/arm/boot/dts/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb.dtsi
@@ -81,6 +81,14 @@
macb1: ethernet@f802c000 {
phy-mode = "rmii";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@1 {
+ interrupt-parent = <&pioE>;
+ interrupts = <30 IRQ_TYPE_EDGE_FALLING>;
+ reg = <1>;
+ };
};
pinctrl@fffff200 {
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index fe31bfcbb8df..c98511c5abd1 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -73,9 +73,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = {
static struct mcp251x_platform_data mcp251x_info = {
.oscillator_frequency = 16E6,
- .board_specific_setup = NULL,
- .power_enable = NULL,
- .transceiver_enable = NULL
};
static struct spi_board_info mcp251x_board_info[] = {
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index f5d436434566..04a0aea23873 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -29,6 +29,8 @@
#include <linux/i2c/pca953x.h>
#include <linux/apm-emulation.h>
#include <linux/can/platform/mcp251x.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
#include <asm/suspend.h>
@@ -391,33 +393,34 @@ static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = {
};
/* CAN bus on SPI */
-static int zeus_mcp2515_setup(struct spi_device *sdev)
-{
- int err;
-
- err = gpio_request(ZEUS_CAN_SHDN_GPIO, "CAN shutdown");
- if (err)
- return err;
+static struct regulator_consumer_supply can_regulator_consumer =
+ REGULATOR_SUPPLY("vdd", "spi3.0");
- err = gpio_direction_output(ZEUS_CAN_SHDN_GPIO, 1);
- if (err) {
- gpio_free(ZEUS_CAN_SHDN_GPIO);
- return err;
- }
+static struct regulator_init_data can_regulator_init_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = &can_regulator_consumer,
+ .num_consumer_supplies = 1,
+};
- return 0;
-}
+static struct fixed_voltage_config can_regulator_pdata = {
+ .supply_name = "CAN_SHDN",
+ .microvolts = 3300000,
+ .gpio = ZEUS_CAN_SHDN_GPIO,
+ .init_data = &can_regulator_init_data,
+};
-static int zeus_mcp2515_transceiver_enable(int enable)
-{
- gpio_set_value(ZEUS_CAN_SHDN_GPIO, !enable);
- return 0;
-}
+static struct platform_device can_regulator_device = {
+ .name = "reg-fixed-volage",
+ .id = -1,
+ .dev = {
+ .platform_data = &can_regulator_pdata,
+ },
+};
static struct mcp251x_platform_data zeus_mcp2515_pdata = {
.oscillator_frequency = 16*1000*1000,
- .board_specific_setup = zeus_mcp2515_setup,
- .power_enable = zeus_mcp2515_transceiver_enable,
};
static struct spi_board_info zeus_spi_board_info[] = {
@@ -516,6 +519,7 @@ static struct platform_device *zeus_devices[] __initdata = {
&zeus_leds_device,
&zeus_pcmcia_device,
&zeus_max6369_device,
+ &can_regulator_device,
};
/* AC'97 */
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index c5be60d85e4b..3a6ffa250fb1 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -358,7 +358,6 @@ static struct platform_device usbhsf_device = {
static struct sh_eth_plat_data sh_eth_platdata = {
.phy = 0x00, /* LAN8710A */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index 24f26ee97f42..35dd7f201a16 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -91,7 +91,6 @@ static struct sh_mobile_sdhi_info sdhi0_info = {
static struct sh_eth_plat_data ether_platform_data __initdata = {
.phy = 0x01,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_RCAR,
.phy_interface = PHY_INTERFACE_MODE_RMII,
/*
* Although the LINK signal is available on the board, it's connected to
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c
index 4d94dff9015c..7291e2f11a47 100644
--- a/arch/sh/boards/board-espt.c
+++ b/arch/sh/boards/board-espt.c
@@ -80,7 +80,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 0,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 4f114d1cd019..25c5a932f9fe 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -77,7 +77,6 @@ static struct resource sh_eth0_resources[] = {
static struct sh_eth_plat_data sh7757_eth0_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_SH4,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
@@ -106,7 +105,6 @@ static struct resource sh_eth1_resources[] = {
static struct sh_eth_plat_data sh7757_eth1_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_SH4,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
@@ -151,7 +149,6 @@ static struct resource sh_eth_giga0_resources[] = {
static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
.phy = 18,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
@@ -186,7 +183,6 @@ static struct resource sh_eth_giga1_resources[] = {
static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
.phy = 19,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 61fade0ffa96..a4f630f04ea3 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8700 */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_SH4,
.phy_interface = PHY_INTERFACE_MODE_MII,
.ether_link_active_low = 1
};
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index b70180ef3e29..21e4230659a5 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -365,7 +365,7 @@ static struct platform_device keysc_device = {
static struct resource sh_eth_resources[] = {
[0] = {
.start = SH_ETH_ADDR,
- .end = SH_ETH_ADDR + 0x1FC,
+ .end = SH_ETH_ADDR + 0x1FC - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -377,6 +377,7 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8187 */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device sh_eth_device = {
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c
index 50ba481fa240..2c8fb04685d4 100644
--- a/arch/sh/boards/mach-sh7763rdp/setup.c
+++ b/arch/sh/boards/mach-sh7763rdp/setup.c
@@ -88,7 +88,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index bb11e1925178..4df4d4ffe39b 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_eth.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
@@ -110,10 +111,16 @@ static struct platform_device scif2_device = {
},
};
+static struct sh_eth_plat_data eth_platform_data = {
+ .phy = 1,
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
static struct resource eth_resources[] = {
[0] = {
.start = 0xfb000000,
- .end = 0xfb0001c8,
+ .end = 0xfb0001c7,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -127,7 +134,7 @@ static struct platform_device eth_device = {
.name = "sh7619-ether",
.id = -1,
.dev = {
- .platform_data = (void *)1,
+ .platform_data = &eth_platform_data,
},
.num_resources = ARRAY_SIZE(eth_resources),
.resource = eth_resources,
diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c
index 31b87bf8c027..4f8f3d619c4a 100644
--- a/arch/tile/gxio/iorpc_mpipe.c
+++ b/arch/tile/gxio/iorpc_mpipe.c
@@ -387,6 +387,27 @@ int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac)
EXPORT_SYMBOL(gxio_mpipe_link_close_aux);
+struct link_set_attr_aux_param {
+ int mac;
+ uint32_t attr;
+ int64_t val;
+};
+
+int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
+ uint32_t attr, int64_t val)
+{
+ struct link_set_attr_aux_param temp;
+ struct link_set_attr_aux_param *params = &temp;
+
+ params->mac = mac;
+ params->attr = attr;
+ params->val = val;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_LINK_SET_ATTR_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_link_set_attr_aux);
struct get_timestamp_aux_param {
uint64_t sec;
@@ -454,6 +475,51 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
+struct adjust_timestamp_freq_param {
+ int32_t ppb;
+};
+
+int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
+ int32_t ppb)
+{
+ struct adjust_timestamp_freq_param temp;
+ struct adjust_timestamp_freq_param *params = &temp;
+
+ params->ppb = ppb;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
+
+struct config_edma_ring_blks_param {
+ unsigned int ering;
+ unsigned int max_blks;
+ unsigned int min_snf_blks;
+ unsigned int db;
+};
+
+int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context,
+ unsigned int ering, unsigned int max_blks,
+ unsigned int min_snf_blks, unsigned int db)
+{
+ struct config_edma_ring_blks_param temp;
+ struct config_edma_ring_blks_param *params = &temp;
+
+ params->ering = ering;
+ params->max_blks = max_blks;
+ params->min_snf_blks = min_snf_blks;
+ params->db = db;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks);
+
struct arm_pollfd_param {
union iorpc_pollfd pollfd;
};
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c
index d0254aa60cba..64883aabeb9c 100644
--- a/arch/tile/gxio/iorpc_mpipe_info.c
+++ b/arch/tile/gxio/iorpc_mpipe_info.c
@@ -16,6 +16,24 @@
#include "gxio/iorpc_mpipe_info.h"
+struct instance_aux_param {
+ _gxio_mpipe_link_name_t name;
+};
+
+int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
+ _gxio_mpipe_link_name_t name)
+{
+ struct instance_aux_param temp;
+ struct instance_aux_param *params = &temp;
+
+ params->name = name;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_info_instance_aux);
+
struct enumerate_aux_param {
_gxio_mpipe_link_name_t name;
_gxio_mpipe_link_mac_t mac;
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index e71c63390acc..5301a9ffbae1 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -36,8 +36,14 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
int fd;
int i;
+ if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
+ return -EINVAL;
+
snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
fd = hv_dev_open((HV_VirtAddr) file, 0);
+
+ context->fd = fd;
+
if (fd < 0) {
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
return fd;
@@ -45,8 +51,6 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
return -ENODEV;
}
- context->fd = fd;
-
/* Map in the MMIO space. */
context->mmio_cfg_base = (void __force *)
iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
@@ -64,12 +68,15 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
for (i = 0; i < 8; i++)
context->__stacks.stacks[i] = 255;
+ context->instance = mpipe_index;
+
return 0;
fast_failed:
iounmap((void __force __iomem *)(context->mmio_cfg_base));
cfg_failed:
hv_dev_close(context->fd);
+ context->fd = -1;
return -ENODEV;
}
@@ -383,7 +390,7 @@ EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
gxio_mpipe_context_t *context,
- unsigned int edma_ring_id,
+ unsigned int ering,
unsigned int channel,
void *mem, unsigned int mem_size,
unsigned int mem_flags)
@@ -394,7 +401,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
/* Offset used to read number of completed commands. */
MPIPE_EDMA_POST_REGION_ADDR_t offset;
- int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel,
+ int result = gxio_mpipe_init_edma_ring(context, ering, channel,
mem, mem_size, mem_flags);
if (result < 0)
return result;
@@ -405,7 +412,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
offset.region =
MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
- offset.ring = edma_ring_id;
+ offset.ring = ering;
__gxio_dma_queue_init(&equeue->dma_queue,
context->mmio_fast_base + offset.word,
@@ -413,6 +420,9 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
equeue->edescs = mem;
equeue->mask_num_entries = num_entries - 1;
equeue->log2_num_entries = __builtin_ctz(num_entries);
+ equeue->context = context;
+ equeue->ering = ering;
+ equeue->channel = channel;
return 0;
}
@@ -493,6 +503,20 @@ static gxio_mpipe_context_t *_gxio_get_link_context(void)
return contextp;
}
+int gxio_mpipe_link_instance(const char *link_name)
+{
+ _gxio_mpipe_link_name_t name;
+ gxio_mpipe_context_t *context = _gxio_get_link_context();
+
+ if (!context)
+ return GXIO_ERR_NO_DEVICE;
+
+ strncpy(name.name, link_name, sizeof(name.name));
+ name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
+
+ return gxio_mpipe_info_instance_aux(context, name);
+}
+
int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
{
int rv;
@@ -543,3 +567,12 @@ int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
}
EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
+
+int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
+ int64_t val)
+{
+ return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
+ val);
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);
diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h
index 9d50fce1b1a7..fdd07f88cfd7 100644
--- a/arch/tile/include/gxio/iorpc_mpipe.h
+++ b/arch/tile/include/gxio/iorpc_mpipe.h
@@ -44,10 +44,13 @@
#define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210)
#define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211)
#define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212)
+#define GXIO_MPIPE_OP_LINK_SET_ATTR_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1213)
-#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e)
-#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f)
-#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220)
+#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121e)
+#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121f)
+#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1220)
+#define GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1221)
+#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1222)
#define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
#define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
@@ -114,6 +117,8 @@ int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac);
+int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
+ uint32_t attr, int64_t val);
int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
uint64_t * nsec, uint64_t * cycles);
@@ -124,6 +129,9 @@ int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
int64_t nsec);
+int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
+ int32_t ppb);
+
int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h
index 0bcf3f71ce8b..476c5e5ca22c 100644
--- a/arch/tile/include/gxio/iorpc_mpipe_info.h
+++ b/arch/tile/include/gxio/iorpc_mpipe_info.h
@@ -27,11 +27,15 @@
#include <asm/pgtable.h>
+#define GXIO_MPIPE_INFO_OP_INSTANCE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1250)
#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
+int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
+ _gxio_mpipe_link_name_t name);
+
int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
unsigned int idx,
_gxio_mpipe_link_name_t * name,
diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h
index b74f470ed11e..e37cf4f0cffd 100644
--- a/arch/tile/include/gxio/mpipe.h
+++ b/arch/tile/include/gxio/mpipe.h
@@ -220,6 +220,13 @@ typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
*/
typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
+/*
+ * Max # of mpipe instances. 2 currently.
+ */
+#define GXIO_MPIPE_INSTANCE_MAX HV_MPIPE_INSTANCE_MAX
+
+#define NR_MPIPE_MAX GXIO_MPIPE_INSTANCE_MAX
+
/* Get the "va" field from an "idesc".
*
* This is the address at which the ingress hardware copied the first
@@ -311,6 +318,9 @@ typedef struct {
/* File descriptor for calling up to Linux (and thus the HV). */
int fd;
+ /* Corresponding mpipe instance #. */
+ int instance;
+
/* The VA at which configuration registers are mapped. */
char *mmio_cfg_base;
@@ -810,7 +820,7 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
/* Initialize an eDMA ring, using the given memory and size.
*
* @param context An initialized mPIPE context.
- * @param ring The eDMA ring index.
+ * @param ering The eDMA ring index.
* @param channel The channel to use. This must be one of the channels
* associated with the context's set of open links.
* @param mem A physically contiguous region of memory to be filled
@@ -823,10 +833,37 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
* ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
*/
extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
- unsigned int ring, unsigned int channel,
+ unsigned int ering, unsigned int channel,
void *mem, size_t mem_size,
unsigned int mem_flags);
+/* Set the "max_blks", "min_snf_blks", and "db" fields of
+ * ::MPIPE_EDMA_RG_INIT_DAT_THRESH_t for a given edma ring.
+ *
+ * The global pool of dynamic blocks will be automatically adjusted.
+ *
+ * This function should not be called after any egress has been done
+ * on the edma ring.
+ *
+ * Most applications should just use gxio_mpipe_equeue_set_snf_size().
+ *
+ * @param context An initialized mPIPE context.
+ * @param ering The eDMA ring index.
+ * @param max_blks The number of blocks to dedicate to the ring
+ * (normally min_snf_blks + 1). Must be greater than min_snf_blocks.
+ * @param min_snf_blks The number of blocks which must be stored
+ * prior to starting to send the packet (normally 12).
+ * @param db Whether to allow use of dynamic blocks by the ring
+ * (normally 1).
+ *
+ * @return 0 on success, negative on error.
+ */
+extern int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
+ unsigned int ering,
+ unsigned int max_blks,
+ unsigned int min_snf_blks,
+ unsigned int db);
+
/*****************************************************************
* Classifier Program *
******************************************************************/
@@ -1288,15 +1325,39 @@ typedef struct {
/* The log2() of the number of entries. */
unsigned long log2_num_entries;
+ /* The context. */
+ gxio_mpipe_context_t *context;
+
+ /* The ering. */
+ unsigned int ering;
+
+ /* The channel. */
+ unsigned int channel;
+
} gxio_mpipe_equeue_t;
/* Initialize an "equeue".
*
- * Takes the equeue plus the same args as gxio_mpipe_init_edma_ring().
+ * This function uses gxio_mpipe_init_edma_ring() to initialize the
+ * underlying edma_ring using the provided arguments.
+ *
+ * @param equeue An egress queue to be initialized.
+ * @param context An initialized mPIPE context.
+ * @param ering The eDMA ring index.
+ * @param channel The channel to use. This must be one of the channels
+ * associated with the context's set of open links.
+ * @param mem A physically contiguous region of memory to be filled
+ * with a ring of ::gxio_mpipe_edesc_t structures.
+ * @param mem_size Number of bytes in the ring. Must be 512, 2048,
+ * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)).
+ * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
+ *
+ * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or
+ * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
*/
extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
gxio_mpipe_context_t *context,
- unsigned int edma_ring_id,
+ unsigned int ering,
unsigned int channel,
void *mem, unsigned int mem_size,
unsigned int mem_flags);
@@ -1494,6 +1555,37 @@ static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue,
completion_slot, update);
}
+/* Set the snf (store and forward) size for an equeue.
+ *
+ * The snf size for an equeue defaults to 1536, and encodes the size
+ * of the largest packet for which egress is guaranteed to avoid
+ * transmission underruns and/or corrupt checksums under heavy load.
+ *
+ * The snf size affects a global resource pool which cannot support,
+ * for example, all 24 equeues each requesting an snf size of 8K.
+ *
+ * To ensure that jumbo packets can be egressed properly, the snf size
+ * should be set to the size of the largest possible packet, which
+ * will usually be limited by the size of the app's largest buffer.
+ *
+ * This is a convenience wrapper around
+ * gxio_mpipe_config_edma_ring_blks().
+ *
+ * This function should not be called after any egress has been done
+ * on the equeue.
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param size The snf size, in bytes.
+ * @return Zero on success, negative error otherwise.
+ */
+static inline int gxio_mpipe_equeue_set_snf_size(gxio_mpipe_equeue_t *equeue,
+ size_t size)
+{
+ int blks = (size + 127) / 128;
+ return gxio_mpipe_config_edma_ring_blks(equeue->context, equeue->ering,
+ blks + 1, blks, 1);
+}
+
/*****************************************************************
* Link Management *
******************************************************************/
@@ -1634,6 +1726,24 @@ typedef struct {
uint8_t mac;
} gxio_mpipe_link_t;
+/* Translate a link name to the instance number of the mPIPE shim which is
+ * connected to that link. This call does not verify whether the link is
+ * currently available, and does not reserve any link resources;
+ * gxio_mpipe_link_open() must be called to perform those functions.
+ *
+ * Typically applications will call this function to translate a link name
+ * to an mPIPE instance number; call gxio_mpipe_init(), passing it that
+ * instance number, to initialize the mPIPE shim; and then call
+ * gxio_mpipe_link_open(), passing it the same link name plus the mPIPE
+ * context, to configure the link.
+ *
+ * @param link_name Name of the link; see @ref gxio_mpipe_link_names.
+ * @return The mPIPE instance number which is associated with the named
+ * link, or a negative error code (::GXIO_ERR_NO_DEVICE) if the link does
+ * not exist.
+ */
+extern int gxio_mpipe_link_instance(const char *link_name);
+
/* Retrieve one of this system's legal link names, and its MAC address.
*
* @param index Link name index. If a system supports N legal link names,
@@ -1697,6 +1807,17 @@ static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link)
return link->channel;
}
+/* Set a link attribute.
+ *
+ * @param link A properly initialized link state object.
+ * @param attr An attribute from the set of @ref gxio_mpipe_link_attrs.
+ * @param val New value of the attribute.
+ * @return 0 if the attribute was successfully set, or a negative error
+ * code.
+ */
+extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
+ int64_t val);
+
///////////////////////////////////////////////////////////////////
// Timestamp //
///////////////////////////////////////////////////////////////////
@@ -1733,4 +1854,18 @@ extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context,
int64_t delta);
+/** Adjust the mPIPE timestamp clock frequency.
+ *
+ * @param context An initialized mPIPE context.
+ * @param ppb A 32-bit signed PPB (Parts Per Billion) value to adjust.
+ * The absolute value of ppb must be less than or equal to 1000000000.
+ * Values less than about 30000 will generally cause a GXIO_ERR_INVAL
+ * return due to the granularity of the hardware that converts reference
+ * clock cycles into seconds and nanoseconds.
+ * @return If the call was successful, zero; otherwise, a negative error
+ * code.
+ */
+extern int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t* context,
+ int32_t ppb);
+
#endif /* !_GXIO_MPIPE_H_ */
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h
index 6cdae3bf046e..c97e416dd963 100644
--- a/arch/tile/include/hv/drv_mpipe_intf.h
+++ b/arch/tile/include/hv/drv_mpipe_intf.h
@@ -23,6 +23,9 @@
#include <arch/mpipe_constants.h>
+/** Number of mPIPE instances supported */
+#define HV_MPIPE_INSTANCE_MAX (2)
+
/** Number of buffer stacks (32). */
#define HV_MPIPE_NUM_BUFFER_STACKS \
(MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 507362a76a73..449f6298dc89 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1088,15 +1088,8 @@ static int he_start(struct atm_dev *dev)
for (i = 0; i < 6; ++i)
dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
- hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
- he_dev->prod_id,
- he_dev->media & 0x40 ? "SM" : "MM",
- dev->esi[0],
- dev->esi[1],
- dev->esi[2],
- dev->esi[3],
- dev->esi[4],
- dev->esi[5]);
+ hprintk("%s%s, %pM\n", he_dev->prod_id,
+ he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
he_dev->atm_dev->link_rate = he_is622(he_dev) ?
ATM_OC12_PCR : ATM_OC3_PCR;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 6587dc295eb0..409502a78e7e 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -153,7 +153,6 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
static void which_list(ns_dev * card, struct sk_buff *skb);
#endif
static void ns_poll(unsigned long arg);
-static int ns_parse_mac(char *mac, unsigned char *esi);
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr);
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -779,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
return error;
}
- if (ns_parse_mac(mac[i], card->atmdev->esi)) {
+ if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) {
nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
card->atmdev->esi, 6);
if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
@@ -2802,29 +2801,6 @@ static void ns_poll(unsigned long arg)
PRINTK("nicstar: Leaving ns_poll().\n");
}
-static int ns_parse_mac(char *mac, unsigned char *esi)
-{
- int i, j;
- short byte1, byte0;
-
- if (mac == NULL || esi == NULL)
- return -1;
- j = 0;
- for (i = 0; i < 6; i++) {
- if ((byte1 = hex_to_bin(mac[j++])) < 0)
- return -1;
- if ((byte0 = hex_to_bin(mac[j++])) < 0)
- return -1;
- esi[i] = (unsigned char)(byte1 * 16 + byte0);
- if (i < 5) {
- if (mac[j++] != ':')
- return -1;
- }
- }
- return 0;
-}
-
-
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr)
{
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 380a2003231e..7c081b38ef3e 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -35,8 +35,14 @@ config BCMA_DRIVER_PCI_HOSTMODE
PCI core hostmode operation (external PCI bus).
config BCMA_HOST_SOC
- bool
- depends on BCMA_DRIVER_MIPS
+ bool "Support for BCMA in a SoC"
+ depends on BCMA
+ help
+ Host interface for a Broadcom AIX bus directly mapped into
+ the memory. This only works with the Broadcom SoCs from the
+ BCM47XX line.
+
+ If unsure, say N
config BCMA_DRIVER_MIPS
bool "BCMA Broadcom MIPS core driver"
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index cf7a476a519f..c9fd6943ce45 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -31,7 +31,7 @@ static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
}
-static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
+static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
{
u32 v;
int i;
@@ -55,7 +55,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
}
}
-static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
+static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
{
int max_retries = 10;
u16 ret = 0;
@@ -98,7 +98,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
return ret;
}
-static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
+static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
u8 address, u16 data)
{
int max_retries = 10;
@@ -137,6 +137,13 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
}
+static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
+ u8 address, u16 data)
+{
+ bcma_pcie_mdio_write(pc, device, address, data);
+ return bcma_pcie_mdio_read(pc, device, address);
+}
+
/**************************************************
* Workarounds.
**************************************************/
@@ -203,6 +210,25 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
}
}
+static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
+{
+ u16 data;
+
+ if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
+ data = up ? 0x74 : 0x7C;
+ bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+ BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
+ bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+ BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+ } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
+ data = up ? 0x75 : 0x7D;
+ bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+ BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
+ bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+ BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+ }
+}
+
/**************************************************
* Init.
**************************************************/
@@ -262,7 +288,7 @@ out:
}
EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
-void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
+static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
{
u32 w;
@@ -274,4 +300,33 @@ void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
}
-EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer);
+
+void bcma_core_pci_up(struct bcma_bus *bus)
+{
+ struct bcma_drv_pci *pc;
+
+ if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+ return;
+
+ pc = &bus->drv_pci[0];
+
+ bcma_core_pci_power_save(pc, true);
+
+ bcma_core_pci_extend_L1timer(pc, true);
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_up);
+
+void bcma_core_pci_down(struct bcma_bus *bus)
+{
+ struct bcma_drv_pci *pc;
+
+ if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+ return;
+
+ pc = &bus->drv_pci[0];
+
+ bcma_core_pci_extend_L1timer(pc, false);
+
+ bcma_core_pci_power_save(pc, false);
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_down);
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 30629a3d44cc..c3d7b03c2fdc 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -581,6 +581,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
{
struct bcma_drv_pci_host *pc_host;
+ int readrq;
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
/* This is not a device on the PCI-core bridge. */
@@ -595,6 +596,11 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
dev->irq = bcma_core_irq(pc_host->pdev->core);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ readrq = pcie_get_readrq(dev);
+ if (readrq > 128) {
+ pr_info("change PCIe max read request size from %i to 128\n", readrq);
+ pcie_set_readrq(dev, 128);
+ }
return 0;
}
EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 0067422ec17d..90ee350442a9 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -237,7 +237,7 @@ int bcma_bus_register(struct bcma_bus *bus)
err = bcma_bus_scan(bus);
if (err) {
bcma_err(bus, "Failed to scan: %d\n", err);
- return -1;
+ return err;
}
/* Early init CC core */
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 8bffa5c9818c..cd6b20fce680 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -32,6 +32,18 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
{ BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
{ BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
+ { BCMA_CORE_PCIEG2, "PCIe Gen 2" },
+ { BCMA_CORE_DMA, "DMA" },
+ { BCMA_CORE_SDIO3, "SDIO3" },
+ { BCMA_CORE_USB20, "USB 2.0" },
+ { BCMA_CORE_USB30, "USB 3.0" },
+ { BCMA_CORE_A9JTAG, "ARM Cortex A9 JTAG" },
+ { BCMA_CORE_DDR23, "Denali DDR2/DDR3 memory controller" },
+ { BCMA_CORE_ROM, "ROM" },
+ { BCMA_CORE_NAND, "NAND flash controller" },
+ { BCMA_CORE_QSPI, "SPI flash controller" },
+ { BCMA_CORE_CHIPCOMMON_B, "Chipcommon B" },
+ { BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" },
{ BCMA_CORE_AMEMC, "AMEMC (DDR)" },
{ BCMA_CORE_ALTA, "ALTA (I2S)" },
{ BCMA_CORE_INVALID, "Invalid" },
@@ -201,7 +213,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
return ent;
}
-static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
+static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
u32 type, u8 port)
{
u32 addrl, addrh, sizel, sizeh = 0;
@@ -213,7 +225,7 @@ static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
((ent & SCAN_ADDR_TYPE) != type) ||
(((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) {
bcma_erom_push_ent(eromptr);
- return -EINVAL;
+ return (u32)-EINVAL;
}
addrl = ent & SCAN_ADDR_ADDR;
@@ -261,7 +273,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
struct bcma_device_id *match, int core_num,
struct bcma_device *core)
{
- s32 tmp;
+ u32 tmp;
u8 i, j;
s32 cia, cib;
u8 ports[2], wrappers[2];
@@ -339,11 +351,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
* the main register space for the core
*/
tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
- if (tmp <= 0) {
+ if (tmp == 0 || IS_ERR_VALUE(tmp)) {
/* Try again to see if it is a bridge */
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_BRIDGE, 0);
- if (tmp <= 0) {
+ if (tmp == 0 || IS_ERR_VALUE(tmp)) {
return -EILSEQ;
} else {
bcma_info(bus, "Bridge found\n");
@@ -357,7 +369,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SLAVE, i);
- if (tmp < 0) {
+ if (IS_ERR_VALUE(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: slave port %d "
* "has %d descriptors\n", i, j); */
@@ -374,7 +386,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_MWRAP, i);
- if (tmp < 0) {
+ if (IS_ERR_VALUE(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d "
* "has %d descriptors\n", i, j); */
@@ -392,7 +404,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SWRAP, i + hack);
- if (tmp < 0) {
+ if (IS_ERR_VALUE(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d "
* has %d descriptors\n", i, j); */
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index db2c3c305df8..023d35e3c7a7 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -43,7 +43,7 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
- ret = strict_strtol(buf, 10, &result);
+ ret = kstrtol(buf, 10, &result);
if (ret)
return ret;
@@ -89,7 +89,7 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
- ret = strict_strtol(buf, 10, &result);
+ ret = kstrtol(buf, 10, &result);
if (ret)
return ret;
@@ -135,7 +135,7 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
- ret = strict_strtol(buf, 10, &result);
+ ret = kstrtol(buf, 10, &result);
if (ret)
return ret;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 75c262694632..00da6df9f71e 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -486,7 +486,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
if (firmwarelen - offset < txlen)
txlen = firmwarelen - offset;
- tx_blocks = (txlen + blksz_dl - 1) / blksz_dl;
+ tx_blocks = DIV_ROUND_UP(txlen, blksz_dl);
memcpy(fwbuf, &firmware[offset], txlen);
}
@@ -873,7 +873,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
}
blksz = SDIO_BLOCK_SIZE;
- buf_block_len = (nb + blksz - 1) / blksz;
+ buf_block_len = DIV_ROUND_UP(nb, blksz);
sdio_claim_host(card->func);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index a7e4939787c9..7f910c76ca0a 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1307,11 +1307,11 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
}
if (fifo2 & 2) {
hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
- hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS +
+ hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC);
} else {
hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
- hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS +
+ hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC);
}
#ifdef REVERSE_BITORDER
@@ -1346,14 +1346,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
if (fifo2 & 2) {
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
if (!tics)
- hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+ hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC);
hc->hw.ctmt |= 2;
hc->hw.conn &= ~0x18;
} else {
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
if (!tics)
- hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+ hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC);
hc->hw.ctmt |= 1;
hc->hw.conn &= ~0x03;
@@ -1375,14 +1375,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
if (fifo2 & 2) {
hc->hw.last_bfifo_cnt[1] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
- hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+ hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC);
hc->hw.ctmt &= ~2;
hc->hw.conn &= ~0x18;
} else {
hc->hw.last_bfifo_cnt[0] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
- hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+ hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC);
hc->hw.ctmt &= ~1;
hc->hw.conn &= ~0x03;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 390061d09693..0d8f427ade93 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -143,10 +143,9 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
*/
static inline struct port *__get_first_port(struct bonding *bond)
{
- if (bond->slave_cnt == 0)
- return NULL;
+ struct slave *first_slave = bond_first_slave(bond);
- return &(SLAVE_AD_INFO(bond->first_slave).port);
+ return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
}
/**
@@ -159,13 +158,16 @@ static inline struct port *__get_first_port(struct bonding *bond)
static inline struct port *__get_next_port(struct port *port)
{
struct bonding *bond = __get_bond_by_port(port);
- struct slave *slave = port->slave;
+ struct slave *slave = port->slave, *slave_next;
// If there's no bond for this port, or this is the last slave
- if ((bond == NULL) || (slave->next == bond->first_slave))
+ if (bond == NULL)
+ return NULL;
+ slave_next = bond_next_slave(bond, slave);
+ if (!slave_next || bond_is_first_slave(bond, slave_next))
return NULL;
- return &(SLAVE_AD_INFO(slave->next).port);
+ return &(SLAVE_AD_INFO(slave_next).port);
}
/**
@@ -178,12 +180,14 @@ static inline struct port *__get_next_port(struct port *port)
static inline struct aggregator *__get_first_agg(struct port *port)
{
struct bonding *bond = __get_bond_by_port(port);
+ struct slave *first_slave;
// If there's no bond for this port, or bond has no slaves
- if ((bond == NULL) || (bond->slave_cnt == 0))
+ if (bond == NULL)
return NULL;
+ first_slave = bond_first_slave(bond);
- return &(SLAVE_AD_INFO(bond->first_slave).aggregator);
+ return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
}
/**
@@ -195,14 +199,17 @@ static inline struct aggregator *__get_first_agg(struct port *port)
*/
static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
{
- struct slave *slave = aggregator->slave;
+ struct slave *slave = aggregator->slave, *slave_next;
struct bonding *bond = bond_get_bond_by_slave(slave);
// If there's no bond for this aggregator, or this is the last slave
- if ((bond == NULL) || (slave->next == bond->first_slave))
+ if (bond == NULL)
+ return NULL;
+ slave_next = bond_next_slave(bond, slave);
+ if (!slave_next || bond_is_first_slave(bond, slave_next))
return NULL;
- return &(SLAVE_AD_INFO(slave->next).aggregator);
+ return &(SLAVE_AD_INFO(slave_next).aggregator);
}
/*
@@ -2110,7 +2117,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
read_lock(&bond->lock);
//check if there are any slaves
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto re_arm;
// check if agg_select_timer timer after initialize is timed out
@@ -2336,8 +2343,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
int bond_3ad_set_carrier(struct bonding *bond)
{
struct aggregator *active;
+ struct slave *first_slave;
- active = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator));
+ first_slave = bond_first_slave(bond);
+ if (!first_slave)
+ return 0;
+ active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
if (active) {
/* are enough slaves available to consider link up? */
if (active->num_of_ports < bond->params.min_links) {
@@ -2415,6 +2426,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
struct ad_info ad_info;
int res = 1;
+ read_lock(&bond->lock);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name);
@@ -2432,7 +2444,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
if (agg && (agg->aggregator_identifier == agg_id)) {
@@ -2464,6 +2476,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
}
out:
+ read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
@@ -2501,18 +2514,13 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
*/
void bond_3ad_update_lacp_rate(struct bonding *bond)
{
- int i;
- struct slave *slave;
struct port *port = NULL;
+ struct slave *slave;
int lacp_fast;
- write_lock_bh(&bond->lock);
lacp_fast = bond->params.lacp_fast;
-
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
port = &(SLAVE_AD_INFO(slave).port);
- if (port->slave == NULL)
- continue;
__get_state_machine_lock(port);
if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
@@ -2520,6 +2528,4 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
__release_state_machine_lock(port);
}
-
- write_unlock_bh(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 4ea8ed150d46..91f179d5135c 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -224,13 +224,12 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{
struct slave *slave, *least_loaded;
long long max_gap;
- int i;
least_loaded = NULL;
max_gap = LLONG_MIN;
/* Find the slave with the largest gap */
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (SLAVE_IS_OK(slave)) {
long long gap = compute_gap(slave);
@@ -386,11 +385,10 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
struct slave *rx_slave, *slave, *start_at;
int i = 0;
- if (bond_info->next_rx_slave) {
+ if (bond_info->next_rx_slave)
start_at = bond_info->next_rx_slave;
- } else {
- start_at = bond->first_slave;
- }
+ else
+ start_at = bond_first_slave(bond);
rx_slave = NULL;
@@ -405,7 +403,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
}
if (rx_slave) {
- bond_info->next_rx_slave = rx_slave->next;
+ slave = bond_next_slave(bond, rx_slave);
+ bond_info->next_rx_slave = slave;
}
return rx_slave;
@@ -513,7 +512,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
skb->dev = client_info->slave->dev;
- if (client_info->tag) {
+ if (client_info->vlan_id) {
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
@@ -695,10 +694,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
client_info->ntt = 0;
}
- if (bond_vlan_used(bond)) {
- if (!vlan_get_tag(skb, &client_info->vlan_id))
- client_info->tag = 1;
- }
+ if (!vlan_get_tag(skb, &client_info->vlan_id))
+ client_info->vlan_id = 0;
if (!client_info->assigned) {
u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
@@ -804,7 +801,7 @@ static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
entry->used_prev = RLB_NULL_INDEX;
entry->assigned = 0;
entry->slave = NULL;
- entry->tag = 0;
+ entry->vlan_id = 0;
}
static void rlb_init_table_entry_src(struct rlb_client_info *entry)
{
@@ -961,7 +958,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
- if (curr->tag && (curr->vlan_id == vlan_id))
+ if (curr->vlan_id == vlan_id)
rlb_delete_table_entry(bond, curr_index);
curr_index = next_index;
@@ -972,58 +969,62 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
/*********************** tlb/rlb shared functions *********************/
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
+ u16 vid)
{
- struct bonding *bond = bond_get_bond_by_slave(slave);
struct learning_pkt pkt;
+ struct sk_buff *skb;
int size = sizeof(struct learning_pkt);
- int i;
+ char *data;
memset(&pkt, 0, size);
memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
pkt.type = cpu_to_be16(ETH_P_LOOP);
- for (i = 0; i < MAX_LP_BURST; i++) {
- struct sk_buff *skb;
- char *data;
+ skb = dev_alloc_skb(size);
+ if (!skb)
+ return;
+
+ data = skb_put(skb, size);
+ memcpy(data, &pkt, size);
+
+ skb_reset_mac_header(skb);
+ skb->network_header = skb->mac_header + ETH_HLEN;
+ skb->protocol = pkt.type;
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = slave->dev;
- skb = dev_alloc_skb(size);
+ if (vid) {
+ skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
if (!skb) {
+ pr_err("%s: Error: failed to insert VLAN tag\n",
+ slave->bond->dev->name);
return;
}
+ }
- data = skb_put(skb, size);
- memcpy(data, &pkt, size);
-
- skb_reset_mac_header(skb);
- skb->network_header = skb->mac_header + ETH_HLEN;
- skb->protocol = pkt.type;
- skb->priority = TC_PRIO_CONTROL;
- skb->dev = slave->dev;
+ dev_queue_xmit(skb);
+}
- if (bond_vlan_used(bond)) {
- struct vlan_entry *vlan;
- vlan = bond_next_vlan(bond,
- bond->alb_info.current_alb_vlan);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+{
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+ struct net_device *upper;
+ struct list_head *iter;
- bond->alb_info.current_alb_vlan = vlan;
- if (!vlan) {
- kfree_skb(skb);
- continue;
- }
+ /* send untagged */
+ alb_send_lp_vid(slave, mac_addr, 0);
- skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id);
- if (!skb) {
- pr_err("%s: Error: failed to insert VLAN tag\n",
- bond->dev->name);
- continue;
- }
- }
-
- dev_queue_xmit(skb);
+ /* loop through vlans and send one packet for each */
+ rcu_read_lock();
+ netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ if (upper->priv_flags & IFF_802_1Q_VLAN)
+ alb_send_lp_vid(slave, mac_addr,
+ vlan_dev_vlan_id(upper));
}
+ rcu_read_unlock();
}
static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
@@ -1173,9 +1174,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
{
struct slave *tmp_slave1, *free_mac_slave = NULL;
struct slave *has_bond_addr = bond->curr_active_slave;
- int i;
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
/* this is the first slave */
return 0;
}
@@ -1196,7 +1196,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/* The slave's address is equal to the address of the bond.
* Search for a spare address in the bond for this slave.
*/
- bond_for_each_slave(bond, tmp_slave1, i) {
+ bond_for_each_slave(bond, tmp_slave1) {
if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
/* no slave has tmp_slave1's perm addr
* as its curr addr
@@ -1246,17 +1246,15 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
*/
static int alb_set_mac_address(struct bonding *bond, void *addr)
{
- struct sockaddr sa;
- struct slave *slave, *stop_at;
char tmp_addr[ETH_ALEN];
+ struct slave *slave;
+ struct sockaddr sa;
int res;
- int i;
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
return 0;
- }
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
/* save net_device's current hw address */
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
@@ -1276,8 +1274,7 @@ unwind:
sa.sa_family = bond->dev->type;
/* unwind from head to the slave that failed */
- stop_at = slave;
- bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+ bond_for_each_slave_continue_reverse(bond, slave) {
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
dev_set_mac_address(slave->dev, &sa);
memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
@@ -1342,6 +1339,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
/* make sure that the curr_active_slave do not change during tx
*/
+ read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
switch (ntohs(skb->protocol)) {
@@ -1446,11 +1444,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
read_unlock(&bond->curr_slave_lock);
-
+ read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
}
+
return NETDEV_TX_OK;
}
@@ -1460,11 +1459,10 @@ void bond_alb_monitor(struct work_struct *work)
alb_work.work);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *slave;
- int i;
read_lock(&bond->lock);
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
@@ -1482,9 +1480,8 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave)
alb_send_learning_packets(slave, slave->dev->dev_addr);
- }
read_unlock(&bond->curr_slave_lock);
@@ -1496,7 +1493,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
@@ -1602,9 +1599,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
*/
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
{
- if (bond->slave_cnt > 1) {
+ if (!list_empty(&bond->slave_list))
alb_change_hw_addr_on_detach(bond, slave);
- }
tlb_clear_slave(bond, slave, 0);
@@ -1661,9 +1657,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
{
struct slave *swap_slave;
- if (bond->curr_active_slave == new_slave) {
+ if (bond->curr_active_slave == new_slave)
return;
- }
if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
dev_set_promiscuity(bond->curr_active_slave->dev, -1);
@@ -1672,11 +1667,10 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
}
swap_slave = bond->curr_active_slave;
- bond->curr_active_slave = new_slave;
+ rcu_assign_pointer(bond->curr_active_slave, new_slave);
- if (!new_slave || (bond->slave_cnt == 0)) {
+ if (!new_slave || list_empty(&bond->slave_list))
return;
- }
/* set the new curr_active_slave to the bonds mac address
* i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
@@ -1689,9 +1683,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
* ignored so we can mess with their MAC addresses without
* fear of interference from transmit activity.
*/
- if (swap_slave) {
+ if (swap_slave)
tlb_clear_slave(bond, swap_slave, 1);
- }
tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock);
@@ -1768,11 +1761,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
- if (bond->alb_info.current_alb_vlan &&
- (bond->alb_info.current_alb_vlan->vlan_id == vlan_id)) {
- bond->alb_info.current_alb_vlan = NULL;
- }
-
if (bond->alb_info.rlb_enabled) {
rlb_clear_vlan(bond, vlan_id);
}
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e7a5b8b37ea3..28d8e4c7dc06 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -53,7 +53,6 @@ struct slave;
#define TLB_NULL_INDEX 0xffffffff
-#define MAX_LP_BURST 3
/* rlb defs */
#define RLB_HASH_TABLE_SIZE 256
@@ -126,7 +125,6 @@ struct rlb_client_info {
u8 assigned; /* checking whether this entry is assigned */
u8 ntt; /* flag - need to transmit client info */
struct slave *slave; /* the slave assigned to this client */
- u8 tag; /* flag - need to tag skb */
unsigned short vlan_id; /* VLAN tag associated with IP address */
};
@@ -170,7 +168,6 @@ struct alb_bond_info {
* rx traffic should be
* rebalanced
*/
- struct vlan_entry *current_alb_vlan;
};
int bond_alb_initialize(struct bonding *bond, int rlb_enabled);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e48cb339c0c6..39e5b1c7ffe2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -77,6 +77,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/pkt_sched.h>
+#include <linux/rculist.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -106,7 +107,7 @@ static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
static char *arp_all_targets;
static char *fail_over_mac;
-static int all_slaves_active = 0;
+static int all_slaves_active;
static struct bond_params bonding_defaults;
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
@@ -273,7 +274,7 @@ const char *bond_mode_name(int mode)
[BOND_MODE_ALB] = "adaptive load balancing",
};
- if (mode < 0 || mode > BOND_MODE_ALB)
+ if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
return "unknown";
return names[mode];
@@ -282,116 +283,6 @@ const char *bond_mode_name(int mode)
/*---------------------------------- VLAN -----------------------------------*/
/**
- * bond_add_vlan - add a new vlan id on bond
- * @bond: bond that got the notification
- * @vlan_id: the vlan id to add
- *
- * Returns -ENOMEM if allocation failed.
- */
-static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
-{
- struct vlan_entry *vlan;
-
- pr_debug("bond: %s, vlan id %d\n",
- (bond ? bond->dev->name : "None"), vlan_id);
-
- vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL);
- if (!vlan)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&vlan->vlan_list);
- vlan->vlan_id = vlan_id;
-
- write_lock_bh(&bond->lock);
-
- list_add_tail(&vlan->vlan_list, &bond->vlan_list);
-
- write_unlock_bh(&bond->lock);
-
- pr_debug("added VLAN ID %d on bond %s\n", vlan_id, bond->dev->name);
-
- return 0;
-}
-
-/**
- * bond_del_vlan - delete a vlan id from bond
- * @bond: bond that got the notification
- * @vlan_id: the vlan id to delete
- *
- * returns -ENODEV if @vlan_id was not found in @bond.
- */
-static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
-{
- struct vlan_entry *vlan;
- int res = -ENODEV;
-
- pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
-
- block_netpoll_tx();
- write_lock_bh(&bond->lock);
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (vlan->vlan_id == vlan_id) {
- list_del(&vlan->vlan_list);
-
- if (bond_is_lb(bond))
- bond_alb_clear_vlan(bond, vlan_id);
-
- pr_debug("removed VLAN ID %d from bond %s\n",
- vlan_id, bond->dev->name);
-
- kfree(vlan);
-
- res = 0;
- goto out;
- }
- }
-
- pr_debug("couldn't find VLAN ID %d in bond %s\n",
- vlan_id, bond->dev->name);
-
-out:
- write_unlock_bh(&bond->lock);
- unblock_netpoll_tx();
- return res;
-}
-
-/**
- * bond_next_vlan - safely skip to the next item in the vlans list.
- * @bond: the bond we're working on
- * @curr: item we're advancing from
- *
- * Returns %NULL if list is empty, bond->next_vlan if @curr is %NULL,
- * or @curr->next otherwise (even if it is @curr itself again).
- *
- * Caller must hold bond->lock
- */
-struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
-{
- struct vlan_entry *next, *last;
-
- if (list_empty(&bond->vlan_list))
- return NULL;
-
- if (!curr) {
- next = list_entry(bond->vlan_list.next,
- struct vlan_entry, vlan_list);
- } else {
- last = list_entry(bond->vlan_list.prev,
- struct vlan_entry, vlan_list);
- if (last == curr) {
- next = list_entry(bond->vlan_list.next,
- struct vlan_entry, vlan_list);
- } else {
- next = list_entry(curr->vlan_list.next,
- struct vlan_entry, vlan_list);
- }
- }
-
- return next;
-}
-
-/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
* @bond: bond device that got this skb for tx.
@@ -441,28 +332,20 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *stop_at;
- int i, res;
+ struct slave *slave;
+ int res;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
res = vlan_vid_add(slave->dev, proto, vid);
if (res)
goto unwind;
}
- res = bond_add_vlan(bond, vid);
- if (res) {
- pr_err("%s: Error: Failed to add vlan id %d\n",
- bond_dev->name, vid);
- return res;
- }
-
return 0;
unwind:
- /* unwind from head to the slave that failed */
- stop_at = slave;
- bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+ /* unwind from the slave that failed */
+ bond_for_each_slave_continue_reverse(bond, slave)
vlan_vid_del(slave->dev, proto, vid);
return res;
@@ -478,48 +361,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- int i, res;
- bond_for_each_slave(bond, slave, i)
+ bond_for_each_slave(bond, slave)
vlan_vid_del(slave->dev, proto, vid);
- res = bond_del_vlan(bond, vid);
- if (res) {
- pr_err("%s: Error: Failed to remove vlan id %d\n",
- bond_dev->name, vid);
- return res;
- }
+ if (bond_is_lb(bond))
+ bond_alb_clear_vlan(bond, vid);
return 0;
}
-static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
-{
- struct vlan_entry *vlan;
- int res;
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
- vlan->vlan_id);
- if (res)
- pr_warning("%s: Failed to add vlan id %d to device %s\n",
- bond->dev->name, vlan->vlan_id,
- slave_dev->name);
- }
-}
-
-static void bond_del_vlans_from_slave(struct bonding *bond,
- struct net_device *slave_dev)
-{
- struct vlan_entry *vlan;
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (!vlan->vlan_id)
- continue;
- vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
- }
-}
-
/*------------------------------- Link status -------------------------------*/
/*
@@ -532,15 +383,14 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
static int bond_set_carrier(struct bonding *bond)
{
struct slave *slave;
- int i;
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto down;
if (bond->params.mode == BOND_MODE_8023AD)
return bond_3ad_set_carrier(bond);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (slave->link == BOND_LINK_UP) {
if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
@@ -681,8 +531,8 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
}
} else {
struct slave *slave;
- int i;
- bond_for_each_slave(bond, slave, i) {
+
+ bond_for_each_slave(bond, slave) {
err = dev_set_promiscuity(slave->dev, inc);
if (err)
return err;
@@ -705,8 +555,8 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
}
} else {
struct slave *slave;
- int i;
- bond_for_each_slave(bond, slave, i) {
+
+ bond_for_each_slave(bond, slave) {
err = dev_set_allmulti(slave->dev, inc);
if (err)
return err;
@@ -715,15 +565,6 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
return err;
}
-static void __bond_resend_igmp_join_requests(struct net_device *dev)
-{
- struct in_device *in_dev;
-
- in_dev = __in_dev_get_rcu(dev);
- if (in_dev)
- ip_mc_rejoin_groups(in_dev);
-}
-
/*
* Retrieve the list of registered multicast addresses for the bonding
* device and retransmit an IGMP JOIN request to the current active
@@ -731,33 +572,12 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
*/
static void bond_resend_igmp_join_requests(struct bonding *bond)
{
- struct net_device *bond_dev, *vlan_dev, *upper_dev;
- struct vlan_entry *vlan;
-
- read_lock(&bond->lock);
- rcu_read_lock();
-
- bond_dev = bond->dev;
-
- /* rejoin all groups on bond device */
- __bond_resend_igmp_join_requests(bond_dev);
-
- /*
- * if bond is enslaved to a bridge,
- * then rejoin all groups on its master
- */
- upper_dev = netdev_master_upper_dev_get_rcu(bond_dev);
- if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE)
- __bond_resend_igmp_join_requests(upper_dev);
-
- /* rejoin all groups on vlan devices */
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
- vlan->vlan_id);
- if (vlan_dev)
- __bond_resend_igmp_join_requests(vlan_dev);
+ if (!rtnl_trylock()) {
+ queue_delayed_work(bond->wq, &bond->mcast_work, 1);
+ return;
}
- rcu_read_unlock();
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
+ rtnl_unlock();
/* We use curr_slave_lock to protect against concurrent access to
* igmp_retrans from multiple running instances of this function and
@@ -769,7 +589,6 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
}
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
}
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -808,6 +627,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
struct slave *old_active)
{
+ ASSERT_RTNL();
+
if (old_active) {
if (bond->dev->flags & IFF_PROMISC)
dev_set_promiscuity(old_active->dev, -1);
@@ -966,9 +787,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
new_active = bond->curr_active_slave;
if (!new_active) { /* there were no active slaves left */
- if (bond->slave_cnt > 0) /* found one slave */
- new_active = bond->first_slave;
- else
+ new_active = bond_first_slave(bond);
+ if (!new_active)
return NULL; /* still no slave, return NULL */
}
@@ -1008,7 +828,6 @@ static bool bond_should_notify_peers(struct bonding *bond)
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
- bond->send_peer_notif--;
return true;
}
@@ -1071,7 +890,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
if (new_active)
bond_set_slave_active_flags(new_active);
} else {
- bond->curr_active_slave = new_active;
+ rcu_assign_pointer(bond->curr_active_slave, new_active);
}
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
@@ -1115,7 +934,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
((USES_PRIMARY(bond->params.mode) && new_active) ||
bond->params.mode == BOND_MODE_ROUNDROBIN)) {
bond->igmp_retrans = bond->params.resend_igmp;
- queue_delayed_work(bond->wq, &bond->mcast_work, 0);
+ queue_delayed_work(bond->wq, &bond->mcast_work, 1);
}
}
@@ -1161,17 +980,7 @@ void bond_select_active_slave(struct bonding *bond)
*/
static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
{
- if (bond->first_slave == NULL) { /* attaching the first slave */
- new_slave->next = new_slave;
- new_slave->prev = new_slave;
- bond->first_slave = new_slave;
- } else {
- new_slave->next = bond->first_slave;
- new_slave->prev = bond->first_slave->prev;
- new_slave->next->prev = new_slave;
- new_slave->prev->next = new_slave;
- }
-
+ list_add_tail_rcu(&new_slave->list, &bond->slave_list);
bond->slave_cnt++;
}
@@ -1187,22 +996,7 @@ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
*/
static void bond_detach_slave(struct bonding *bond, struct slave *slave)
{
- if (slave->next)
- slave->next->prev = slave->prev;
-
- if (slave->prev)
- slave->prev->next = slave->next;
-
- if (bond->first_slave == slave) { /* slave is the first slave */
- if (bond->slave_cnt > 1) { /* there are more slave */
- bond->first_slave = slave->next;
- } else {
- bond->first_slave = NULL; /* slave was the last one */
- }
- }
-
- slave->next = NULL;
- slave->prev = NULL;
+ list_del_rcu(&slave->list);
bond->slave_cnt--;
}
@@ -1249,47 +1043,31 @@ static void bond_poll_controller(struct net_device *bond_dev)
{
}
-static void __bond_netpoll_cleanup(struct bonding *bond)
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
+ struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- int i;
- bond_for_each_slave(bond, slave, i)
+ bond_for_each_slave(bond, slave)
if (IS_UP(slave->dev))
slave_disable_netpoll(slave);
}
-static void bond_netpoll_cleanup(struct net_device *bond_dev)
-{
- struct bonding *bond = netdev_priv(bond_dev);
-
- read_lock(&bond->lock);
- __bond_netpoll_cleanup(bond);
- read_unlock(&bond->lock);
-}
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
{
struct bonding *bond = netdev_priv(dev);
struct slave *slave;
- int i, err = 0;
+ int err = 0;
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
err = slave_enable_netpoll(slave);
if (err) {
- __bond_netpoll_cleanup(bond);
+ bond_netpoll_cleanup(dev);
break;
}
}
- read_unlock(&bond->lock);
return err;
}
-
-static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
-{
- return bond->dev->npinfo;
-}
-
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
@@ -1306,34 +1084,29 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
/*---------------------------------- IOCTL ----------------------------------*/
static netdev_features_t bond_fix_features(struct net_device *dev,
- netdev_features_t features)
+ netdev_features_t features)
{
- struct slave *slave;
struct bonding *bond = netdev_priv(dev);
netdev_features_t mask;
- int i;
-
- read_lock(&bond->lock);
+ struct slave *slave;
- if (!bond->first_slave) {
+ if (list_empty(&bond->slave_list)) {
/* Disable adding VLANs to empty bond. But why? --mq */
features |= NETIF_F_VLAN_CHALLENGED;
- goto out;
+ return features;
}
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
features = netdev_increment_features(features,
slave->dev->features,
mask);
}
features = netdev_add_tso_features(features, mask);
-out:
- read_unlock(&bond->lock);
return features;
}
@@ -1343,21 +1116,18 @@ out:
static void bond_compute_features(struct bonding *bond)
{
- struct slave *slave;
- struct net_device *bond_dev = bond->dev;
+ unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int gso_max_size = GSO_MAX_SIZE;
+ struct net_device *bond_dev = bond->dev;
u16 gso_max_segs = GSO_MAX_SEGS;
- int i;
- unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
-
- read_lock(&bond->lock);
+ struct slave *slave;
- if (!bond->first_slave)
+ if (list_empty(&bond->slave_list))
goto done;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
@@ -1378,8 +1148,6 @@ done:
flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
bond_dev->priv_flags = flags | dst_release_flag;
- read_unlock(&bond->lock);
-
netdev_change_features(bond_dev);
}
@@ -1545,7 +1313,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* bond ether type mutual exclusion - don't allow slaves of dissimilar
* ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
*/
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
if (bond_dev->type != slave_dev->type) {
pr_debug("%s: change device type from %d to %d\n",
bond_dev->name,
@@ -1584,7 +1352,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
if (slave_ops->ndo_set_mac_address == NULL) {
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
bond_dev->name);
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1600,7 +1368,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (!bond->slave_cnt && bond->dev->addr_assign_type == NET_ADDR_RANDOM)
+ if (list_empty(&bond->slave_list) &&
+ bond->dev->addr_assign_type == NET_ADDR_RANDOM)
bond_set_dev_addr(bond->dev, slave_dev);
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1608,7 +1377,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = -ENOMEM;
goto err_undo_flags;
}
-
+ INIT_LIST_HEAD(&new_slave->list);
/*
* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
@@ -1703,7 +1472,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
dev_mc_add(slave_dev, lacpdu_multicast);
}
- bond_add_vlans_on_slave(bond, slave_dev);
+ res = vlan_vids_add_by_dev(slave_dev, bond_dev);
+ if (res) {
+ pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
+ bond_dev->name, slave_dev->name);
+ goto err_close;
+ }
write_lock_bh(&bond->lock);
@@ -1794,15 +1568,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
bond_set_slave_inactive_flags(new_slave);
/* if this is the first slave */
- if (bond->slave_cnt == 1) {
+ if (bond_first_slave(bond) == new_slave) {
SLAVE_AD_INFO(new_slave).id = 1;
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
} else {
+ struct slave *prev_slave;
+
+ prev_slave = bond_prev_slave(bond, new_slave);
SLAVE_AD_INFO(new_slave).id =
- SLAVE_AD_INFO(new_slave->prev).id + 1;
+ SLAVE_AD_INFO(prev_slave).id + 1;
}
bond_3ad_bind_slave(new_slave);
@@ -1824,7 +1601,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* so we can change it without calling change_active_interface()
*/
if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
- bond->curr_active_slave = new_slave;
+ rcu_assign_pointer(bond->curr_active_slave, new_slave);
break;
} /* switch(bond_mode) */
@@ -1834,7 +1611,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
#ifdef CONFIG_NET_POLL_CONTROLLER
- slave_dev->npinfo = bond_netpoll_info(bond);
+ slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
read_unlock(&bond->lock);
@@ -1876,7 +1653,7 @@ err_detach:
if (!USES_PRIMARY(bond->params.mode))
bond_hw_addr_flush(bond_dev, slave_dev);
- bond_del_vlans_from_slave(bond, slave_dev);
+ vlan_vids_del_by_dev(slave_dev, bond_dev);
write_lock_bh(&bond->lock);
bond_detach_slave(bond, new_slave);
if (bond->primary_slave == new_slave)
@@ -1921,7 +1698,7 @@ err_free:
err_undo_flags:
bond_compute_features(bond);
/* Enslave of first slave has failed and we need to fix master's mac */
- if (bond->slave_cnt == 0 &&
+ if (list_empty(&bond->slave_list) &&
ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
@@ -1977,15 +1754,6 @@ static int __bond_release_one(struct net_device *bond_dev,
netdev_rx_handler_unregister(slave_dev);
write_lock_bh(&bond->lock);
- if (!all && !bond->params.fail_over_mac) {
- if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
- bond->slave_cnt > 1)
- pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
- bond_dev->name, slave_dev->name,
- slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
- }
-
/* Inform AD package of unbinding of slave. */
if (bond->params.mode == BOND_MODE_8023AD) {
/* must be called before the slave is
@@ -2006,6 +1774,15 @@ static int __bond_release_one(struct net_device *bond_dev,
/* release the slave from its bond */
bond_detach_slave(bond, slave);
+ if (!all && !bond->params.fail_over_mac) {
+ if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
+ !list_empty(&bond->slave_list))
+ pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
+ bond_dev->name, slave_dev->name,
+ slave->perm_hwaddr,
+ bond_dev->name, slave_dev->name);
+ }
+
if (bond->primary_slave == slave)
bond->primary_slave = NULL;
@@ -2024,7 +1801,7 @@ static int __bond_release_one(struct net_device *bond_dev,
}
if (all) {
- bond->curr_active_slave = NULL;
+ rcu_assign_pointer(bond->curr_active_slave, NULL);
} else if (oldcurrent == slave) {
/*
* Note that we hold RTNL over this sequence, so there
@@ -2042,11 +1819,11 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
}
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
- if (bond_vlan_used(bond)) {
+ if (vlan_uses_dev(bond_dev)) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2056,8 +1833,9 @@ static int __bond_release_one(struct net_device *bond_dev,
write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
+ synchronize_rcu();
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
@@ -2071,7 +1849,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* must do this from outside any spinlocks */
bond_destroy_slave_symlinks(bond_dev, slave_dev);
- bond_del_vlans_from_slave(bond, slave_dev);
+ vlan_vids_del_by_dev(slave_dev, bond_dev);
/* If the mode USES_PRIMARY, then this cases was handled above by
* bond_change_active_slave(..., NULL)
@@ -2128,7 +1906,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
int ret;
ret = bond_release(bond_dev, slave_dev);
- if ((ret == 0) && (bond->slave_cnt == 0)) {
+ if (ret == 0 && list_empty(&bond->slave_list)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("%s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
@@ -2165,23 +1943,19 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
read_lock(&bond->lock);
- read_lock(&bond->curr_slave_lock);
old_active = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
new_active = bond_get_slave_by_dev(bond, slave_dev);
-
/*
* Changing to the current active: do nothing; return success.
*/
- if (new_active && (new_active == old_active)) {
+ if (new_active && new_active == old_active) {
read_unlock(&bond->lock);
return 0;
}
- if ((new_active) &&
- (old_active) &&
- (new_active->link == BOND_LINK_UP) &&
+ if (new_active &&
+ old_active &&
+ new_active->link == BOND_LINK_UP &&
IS_UP(new_active->dev)) {
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
@@ -2213,13 +1987,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
struct bonding *bond = netdev_priv(bond_dev);
+ int i = 0, res = -ENODEV;
struct slave *slave;
- int i, res = -ENODEV;
read_lock(&bond->lock);
-
- bond_for_each_slave(bond, slave, i) {
- if (i == (int)info->slave_id) {
+ bond_for_each_slave(bond, slave) {
+ if (i++ == (int)info->slave_id) {
res = 0;
strcpy(info->slave_name, slave->dev->name);
info->link = slave->link;
@@ -2228,7 +2001,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
break;
}
}
-
read_unlock(&bond->lock);
return res;
@@ -2239,13 +2011,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
static int bond_miimon_inspect(struct bonding *bond)
{
+ int link_state, commit = 0;
struct slave *slave;
- int i, link_state, commit = 0;
bool ignore_updelay;
ignore_updelay = !bond->curr_active_slave ? true : false;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2340,9 +2112,8 @@ static int bond_miimon_inspect(struct bonding *bond)
static void bond_miimon_commit(struct bonding *bond)
{
struct slave *slave;
- int i;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
@@ -2447,7 +2218,7 @@ void bond_mii_monitor(struct work_struct *work)
delay = msecs_to_jiffies(bond->params.miimon);
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
@@ -2479,35 +2250,32 @@ re_arm:
read_unlock(&bond->lock);
if (should_notify_peers) {
- if (!rtnl_trylock()) {
- read_lock(&bond->lock);
- bond->send_peer_notif++;
- read_unlock(&bond->lock);
+ if (!rtnl_trylock())
return;
- }
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
rtnl_unlock();
}
}
-static int bond_has_this_ip(struct bonding *bond, __be32 ip)
+static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
{
- struct vlan_entry *vlan;
- struct net_device *vlan_dev;
+ struct net_device *upper;
+ struct list_head *iter;
+ bool ret = false;
if (ip == bond_confirm_addr(bond->dev, 0, ip))
- return 1;
+ return true;
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
- vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q),
- vlan->vlan_id);
- rcu_read_unlock();
- if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip))
- return 1;
+ rcu_read_lock();
+ netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ if (ip == bond_confirm_addr(upper, 0, ip)) {
+ ret = true;
+ break;
+ }
}
+ rcu_read_unlock();
- return 0;
+ return ret;
}
/*
@@ -2542,81 +2310,79 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
- int i, vlan_id;
- __be32 *targets = bond->params.arp_targets;
- struct vlan_entry *vlan;
- struct net_device *vlan_dev = NULL;
+ struct net_device *upper, *vlan_upper;
+ struct list_head *iter, *vlan_iter;
struct rtable *rt;
+ __be32 *targets = bond->params.arp_targets, addr;
+ int i, vlan_id;
- for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
- __be32 addr;
- if (!targets[i])
- break;
+ for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
pr_debug("basa: target %pI4\n", &targets[i]);
- if (!bond_vlan_used(bond)) {
- pr_debug("basa: empty vlan: arp_send\n");
- addr = bond_confirm_addr(bond->dev, targets[i], 0);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, 0);
- continue;
- }
- /*
- * If VLANs are configured, we do a route lookup to
- * determine which VLAN interface would be used, so we
- * can tag the ARP with the proper VLAN tag.
- */
+ /* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
RTO_ONLINK, 0);
if (IS_ERR(rt)) {
- if (net_ratelimit()) {
- pr_warning("%s: no route to arp_ip_target %pI4\n",
- bond->dev->name, &targets[i]);
- }
+ pr_debug("%s: no route to arp_ip_target %pI4\n",
+ bond->dev->name, &targets[i]);
continue;
}
- /*
- * This target is not on a VLAN
+ vlan_id = 0;
+
+ /* bond device itself */
+ if (rt->dst.dev == bond->dev)
+ goto found;
+
+ rcu_read_lock();
+ /* first we search only for vlan devices. for every vlan
+ * found we verify its upper dev list, searching for the
+ * rt->dst.dev. If found we save the tag of the vlan and
+ * proceed to send the packet.
+ *
+ * TODO: QinQ?
*/
- if (rt->dst.dev == bond->dev) {
- ip_rt_put(rt);
- pr_debug("basa: rtdev == bond->dev: arp_send\n");
- addr = bond_confirm_addr(bond->dev, targets[i], 0);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, 0);
- continue;
+ netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
+ if (!is_vlan_dev(vlan_upper))
+ continue;
+ netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
+ if (upper == rt->dst.dev) {
+ vlan_id = vlan_dev_vlan_id(vlan_upper);
+ rcu_read_unlock();
+ goto found;
+ }
+ }
}
- vlan_id = 0;
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
- vlan_dev = __vlan_find_dev_deep(bond->dev,
- htons(ETH_P_8021Q),
- vlan->vlan_id);
- rcu_read_unlock();
- if (vlan_dev == rt->dst.dev) {
- vlan_id = vlan->vlan_id;
- pr_debug("basa: vlan match on %s %d\n",
- vlan_dev->name, vlan_id);
- break;
+ /* if the device we're looking for is not on top of any of
+ * our upper vlans, then just search for any dev that
+ * matches, and in case it's a vlan - save the id
+ */
+ netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ if (upper == rt->dst.dev) {
+ /* if it's a vlan - get its VID */
+ if (is_vlan_dev(upper))
+ vlan_id = vlan_dev_vlan_id(upper);
+
+ rcu_read_unlock();
+ goto found;
}
}
+ rcu_read_unlock();
- if (vlan_id && vlan_dev) {
- ip_rt_put(rt);
- addr = bond_confirm_addr(vlan_dev, targets[i], 0);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, vlan_id);
- continue;
- }
+ /* Not our device - skip */
+ pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
+ bond->dev->name, &targets[i],
+ rt->dst.dev ? rt->dst.dev->name : "NULL");
- if (net_ratelimit()) {
- pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
- bond->dev->name, &targets[i],
- rt->dst.dev ? rt->dst.dev->name : "NULL");
- }
ip_rt_put(rt);
+ continue;
+
+found:
+ addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
+ ip_rt_put(rt);
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+ addr, vlan_id);
}
}
@@ -2713,6 +2479,20 @@ out_unlock:
return RX_HANDLER_ANOTHER;
}
+/* function to verify if we're in the arp_interval timeslice, returns true if
+ * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
+ * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
+ */
+static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
+ int mod)
+{
+ int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
+
+ return time_in_range(jiffies,
+ last_act - delta_in_ticks,
+ last_act + mod * delta_in_ticks + delta_in_ticks/2);
+}
+
/*
* this function is called regularly to monitor each slave's link
* ensuring that traffic is being sent and received when arp monitoring
@@ -2726,21 +2506,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
arp_work.work);
struct slave *slave, *oldcurrent;
int do_failover = 0;
- int delta_in_ticks, extra_ticks;
- int i;
read_lock(&bond->lock);
- delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
- extra_ticks = delta_in_ticks / 2;
-
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto re_arm;
- read_lock(&bond->curr_slave_lock);
oldcurrent = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
* the picture unless it is null. also, slave->jiffies is not needed
@@ -2749,16 +2521,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
unsigned long trans_start = dev_trans_start(slave->dev);
if (slave->link != BOND_LINK_UP) {
- if (time_in_range(jiffies,
- trans_start - delta_in_ticks,
- trans_start + delta_in_ticks + extra_ticks) &&
- time_in_range(jiffies,
- slave->dev->last_rx - delta_in_ticks,
- slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
+ if (bond_time_in_interval(bond, trans_start, 1) &&
+ bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
slave->link = BOND_LINK_UP;
bond_set_active_slave(slave);
@@ -2786,12 +2554,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!time_in_range(jiffies,
- trans_start - delta_in_ticks,
- trans_start + 2 * delta_in_ticks + extra_ticks) ||
- !time_in_range(jiffies,
- slave->dev->last_rx - delta_in_ticks,
- slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
+ if (!bond_time_in_interval(bond, trans_start, 2) ||
+ !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
slave->link = BOND_LINK_DOWN;
bond_set_backup_slave(slave);
@@ -2831,7 +2595,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
re_arm:
if (bond->params.arp_interval)
- queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
+ queue_delayed_work(bond->wq, &bond->arp_work,
+ msecs_to_jiffies(bond->params.arp_interval));
read_unlock(&bond->lock);
}
@@ -2844,32 +2609,21 @@ re_arm:
*
* Called with bond->lock held for read.
*/
-static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
+static int bond_ab_arp_inspect(struct bonding *bond)
{
+ unsigned long trans_start, last_rx;
struct slave *slave;
- int i, commit = 0;
- unsigned long trans_start;
- int extra_ticks;
-
- /* All the time comparisons below need some extra time. Otherwise, on
- * fast networks the ARP probe/reply may arrive within the same jiffy
- * as it was sent. Then, the next time the ARP monitor is run, one
- * arp_interval will already have passed in the comparisons.
- */
- extra_ticks = delta_in_ticks / 2;
+ int commit = 0;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
slave->new_link = BOND_LINK_NOCHANGE;
+ last_rx = slave_last_rx(bond, slave);
if (slave->link != BOND_LINK_UP) {
- if (time_in_range(jiffies,
- slave_last_rx(bond, slave) - delta_in_ticks,
- slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
-
+ if (bond_time_in_interval(bond, last_rx, 1)) {
slave->new_link = BOND_LINK_UP;
commit++;
}
-
continue;
}
@@ -2878,9 +2632,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (time_in_range(jiffies,
- slave->jiffies - delta_in_ticks,
- slave->jiffies + 2 * delta_in_ticks + extra_ticks))
+ if (bond_time_in_interval(bond, slave->jiffies, 2))
continue;
/*
@@ -2898,10 +2650,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*/
if (!bond_is_active_slave(slave) &&
!bond->current_arp_slave &&
- !time_in_range(jiffies,
- slave_last_rx(bond, slave) - delta_in_ticks,
- slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
-
+ !bond_time_in_interval(bond, last_rx, 3)) {
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2914,13 +2663,8 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*/
trans_start = dev_trans_start(slave->dev);
if (bond_is_active_slave(slave) &&
- (!time_in_range(jiffies,
- trans_start - delta_in_ticks,
- trans_start + 2 * delta_in_ticks + extra_ticks) ||
- !time_in_range(jiffies,
- slave_last_rx(bond, slave) - delta_in_ticks,
- slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
-
+ (!bond_time_in_interval(bond, trans_start, 2) ||
+ !bond_time_in_interval(bond, last_rx, 2))) {
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2935,24 +2679,21 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*
* Called with RTNL and bond->lock for read.
*/
-static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
+static void bond_ab_arp_commit(struct bonding *bond)
{
- struct slave *slave;
- int i;
unsigned long trans_start;
+ struct slave *slave;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
case BOND_LINK_UP:
trans_start = dev_trans_start(slave->dev);
- if ((!bond->curr_active_slave &&
- time_in_range(jiffies,
- trans_start - delta_in_ticks,
- trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
- bond->curr_active_slave != slave) {
+ if (bond->curr_active_slave != slave ||
+ (!bond->curr_active_slave &&
+ bond_time_in_interval(bond, trans_start, 1))) {
slave->link = BOND_LINK_UP;
if (bond->current_arp_slave) {
bond_set_slave_inactive_flags(
@@ -3014,7 +2755,7 @@ do_failover:
*/
static void bond_ab_arp_probe(struct bonding *bond)
{
- struct slave *slave;
+ struct slave *slave, *next_slave;
int i;
read_lock(&bond->curr_slave_lock);
@@ -3038,7 +2779,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
*/
if (!bond->current_arp_slave) {
- bond->current_arp_slave = bond->first_slave;
+ bond->current_arp_slave = bond_first_slave(bond);
if (!bond->current_arp_slave)
return;
}
@@ -3046,7 +2787,8 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(bond->current_arp_slave);
/* search for next candidate */
- bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) {
+ next_slave = bond_next_slave(bond, bond->current_arp_slave);
+ bond_for_each_slave_from(bond, slave, i, next_slave) {
if (IS_UP(slave->dev)) {
slave->link = BOND_LINK_BACK;
bond_set_slave_active_flags(slave);
@@ -3087,12 +2829,12 @@ void bond_activebackup_arp_mon(struct work_struct *work)
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
- if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
+ if (bond_ab_arp_inspect(bond)) {
read_unlock(&bond->lock);
/* Race avoidance with bond_close flush of workqueue */
@@ -3105,7 +2847,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
read_lock(&bond->lock);
- bond_ab_arp_commit(bond, delta_in_ticks);
+ bond_ab_arp_commit(bond);
read_unlock(&bond->lock);
rtnl_unlock();
@@ -3121,12 +2863,8 @@ re_arm:
read_unlock(&bond->lock);
if (should_notify_peers) {
- if (!rtnl_trylock()) {
- read_lock(&bond->lock);
- bond->send_peer_notif++;
- read_unlock(&bond->lock);
+ if (!rtnl_trylock())
return;
- }
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
rtnl_unlock();
}
@@ -3161,6 +2899,10 @@ static int bond_master_netdev_event(unsigned long event,
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
break;
+ case NETDEV_NOTIFY_PEERS:
+ if (event_bond->send_peer_notif)
+ event_bond->send_peer_notif--;
+ break;
default:
break;
}
@@ -3234,6 +2976,10 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
break;
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+ call_netdevice_notifiers(event, slave->bond->dev);
+ break;
default:
break;
}
@@ -3403,13 +3149,12 @@ static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- int i;
/* reset slave->backup and slave->inactive */
read_lock(&bond->lock);
- if (bond->slave_cnt > 0) {
+ if (!list_empty(&bond->slave_list)) {
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave);
@@ -3455,17 +3200,10 @@ static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- write_lock_bh(&bond->lock);
- bond->send_peer_notif = 0;
- write_unlock_bh(&bond->lock);
-
bond_work_cancel_all(bond);
- if (bond_is_lb(bond)) {
- /* Must be called only after all
- * slaves have been released
- */
+ bond->send_peer_notif = 0;
+ if (bond_is_lb(bond))
bond_alb_deinitialize(bond);
- }
bond->recv_probe = NULL;
return 0;
@@ -3477,13 +3215,11 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct bonding *bond = netdev_priv(bond_dev);
struct rtnl_link_stats64 temp;
struct slave *slave;
- int i;
memset(stats, 0, sizeof(*stats));
read_lock_bh(&bond->lock);
-
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
const struct rtnl_link_stats64 *sstats =
dev_get_stats(slave->dev, &temp);
@@ -3513,7 +3249,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
stats->tx_window_errors += sstats->tx_window_errors;
}
-
read_unlock_bh(&bond->lock);
return stats;
@@ -3652,41 +3387,35 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- int i;
- read_lock(&bond->lock);
+ ASSERT_RTNL();
if (USES_PRIMARY(bond->params.mode)) {
- read_lock(&bond->curr_slave_lock);
- slave = bond->curr_active_slave;
+ slave = rtnl_dereference(bond->curr_active_slave);
if (slave) {
dev_uc_sync(slave->dev, bond_dev);
dev_mc_sync(slave->dev, bond_dev);
}
- read_unlock(&bond->curr_slave_lock);
} else {
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
dev_uc_sync_multiple(slave->dev, bond_dev);
dev_mc_sync_multiple(slave->dev, bond_dev);
}
}
-
- read_unlock(&bond->lock);
}
static int bond_neigh_init(struct neighbour *n)
{
struct bonding *bond = netdev_priv(n->dev);
- struct slave *slave = bond->first_slave;
const struct net_device_ops *slave_ops;
struct neigh_parms parms;
+ struct slave *slave;
int ret;
+ slave = bond_first_slave(bond);
if (!slave)
return 0;
-
slave_ops = slave->dev->netdev_ops;
-
if (!slave_ops->ndo_neigh_setup)
return 0;
@@ -3735,9 +3464,8 @@ static int bond_neigh_setup(struct net_device *dev,
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *stop_at;
+ struct slave *slave;
int res = 0;
- int i;
pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
(bond_dev ? bond_dev->name : "None"), new_mtu);
@@ -3757,10 +3485,10 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
pr_debug("s %p s->p %p c_m %p\n",
slave,
- slave->prev,
+ bond_prev_slave(bond, slave),
slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3785,8 +3513,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
unwind:
/* unwind from head to the slave that failed */
- stop_at = slave;
- bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+ bond_for_each_slave_continue_reverse(bond, slave) {
int tmp_res;
tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
@@ -3810,9 +3537,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
struct sockaddr *sa = addr, tmp_sa;
- struct slave *slave, *stop_at;
+ struct slave *slave;
int res = 0;
- int i;
if (bond->params.mode == BOND_MODE_ALB)
return bond_alb_set_mac_address(bond_dev, addr);
@@ -3845,7 +3571,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
pr_debug("slave %p %s\n", slave, slave->dev->name);
@@ -3877,8 +3603,7 @@ unwind:
tmp_sa.sa_family = bond_dev->type;
/* unwind from head to the slave that failed */
- stop_at = slave;
- bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+ bond_for_each_slave_continue_reverse(bond, slave) {
int tmp_res;
tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
@@ -3891,12 +3616,50 @@ unwind:
return res;
}
+/**
+ * bond_xmit_slave_id - transmit skb through slave with slave_id
+ * @bond: bonding device that is transmitting
+ * @skb: buffer to transmit
+ * @slave_id: slave id up to slave_cnt-1 through which to transmit
+ *
+ * This function tries to transmit through slave with slave_id but in case
+ * it fails, it tries to find the first available slave for transmission.
+ * The skb is consumed in all cases, thus the function is void.
+ */
+void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+{
+ struct slave *slave;
+ int i = slave_id;
+
+ /* Here we start from the slave with slave_id */
+ bond_for_each_slave_rcu(bond, slave) {
+ if (--i < 0) {
+ if (slave_can_tx(slave)) {
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ return;
+ }
+ }
+ }
+
+ /* Here we start from the first slave up to slave_id */
+ i = slave_id;
+ bond_for_each_slave_rcu(bond, slave) {
+ if (--i < 0)
+ break;
+ if (slave_can_tx(slave)) {
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ return;
+ }
+ }
+ /* no slave that can tx has been found */
+ kfree_skb(skb);
+}
+
static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *start_at;
- int i, slave_no, res = 1;
struct iphdr *iph = ip_hdr(skb);
+ struct slave *slave;
/*
* Start with the curr_active_slave that joined the bond as the
@@ -3905,50 +3668,20 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
* send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic.
*/
- if ((iph->protocol == IPPROTO_IGMP) &&
- (skb->protocol == htons(ETH_P_IP))) {
-
- read_lock(&bond->curr_slave_lock);
- slave = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
- if (!slave)
- goto out;
+ if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave && slave_can_tx(slave))
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
+ bond_xmit_slave_id(bond, skb, 0);
} else {
- /*
- * Concurrent TX may collide on rr_tx_counter; we accept
- * that as being rare enough not to justify using an
- * atomic op here.
- */
- slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
-
- bond_for_each_slave(bond, slave, i) {
- slave_no--;
- if (slave_no < 0)
- break;
- }
- }
-
- start_at = slave;
- bond_for_each_slave_from(bond, slave, i, start_at) {
- if (IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP) &&
- bond_is_active_slave(slave)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- break;
- }
- }
-
-out:
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
+ bond_xmit_slave_id(bond, skb,
+ bond->rr_tx_counter++ % bond->slave_cnt);
}
return NETDEV_TX_OK;
}
-
/*
* in active-backup mode, we know that bond->curr_active_slave is always valid if
* the bond has a usable interface.
@@ -3956,18 +3689,12 @@ out:
static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- int res = 1;
-
- read_lock(&bond->curr_slave_lock);
-
- if (bond->curr_active_slave)
- res = bond_dev_queue_xmit(bond, skb,
- bond->curr_active_slave->dev);
-
- read_unlock(&bond->curr_slave_lock);
+ struct slave *slave;
- if (res)
- /* no suitable interface, frame not sent */
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -3981,87 +3708,39 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *start_at;
- int slave_no;
- int i;
- int res = 1;
- slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
-
- bond_for_each_slave(bond, slave, i) {
- slave_no--;
- if (slave_no < 0)
- break;
- }
-
- start_at = slave;
-
- bond_for_each_slave_from(bond, slave, i, start_at) {
- if (IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP) &&
- bond_is_active_slave(slave)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- break;
- }
- }
-
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
- }
+ bond_xmit_slave_id(bond, skb,
+ bond->xmit_hash_policy(skb, bond->slave_cnt));
return NETDEV_TX_OK;
}
-/*
- * in broadcast mode, we send everything to all usable interfaces.
- */
+/* in broadcast mode, we send everything to all usable interfaces. */
static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *start_at;
- struct net_device *tx_dev = NULL;
- int i;
- int res = 1;
-
- read_lock(&bond->curr_slave_lock);
- start_at = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
- if (!start_at)
- goto out;
+ struct slave *slave = NULL;
- bond_for_each_slave_from(bond, slave, i, start_at) {
- if (IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP) &&
- bond_is_active_slave(slave)) {
- if (tx_dev) {
- struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
- if (!skb2) {
- pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
- bond_dev->name);
- continue;
- }
+ bond_for_each_slave_rcu(bond, slave) {
+ if (bond_is_last_slave(bond, slave))
+ break;
+ if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
- res = bond_dev_queue_xmit(bond, skb2, tx_dev);
- if (res) {
- kfree_skb(skb2);
- continue;
- }
+ if (!skb2) {
+ pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
+ bond_dev->name);
+ continue;
}
- tx_dev = slave->dev;
+ /* bond_dev_queue_xmit always returns 0 */
+ bond_dev_queue_xmit(bond, skb2, slave->dev);
}
}
-
- if (tx_dev)
- res = bond_dev_queue_xmit(bond, skb, tx_dev);
-
-out:
- if (res)
- /* no suitable interface, frame not sent */
+ if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
kfree_skb(skb);
- /* frame sent to all suitable interfaces */
return NETDEV_TX_OK;
}
@@ -4089,15 +3768,15 @@ static void bond_set_xmit_hash_policy(struct bonding *bond)
static inline int bond_slave_override(struct bonding *bond,
struct sk_buff *skb)
{
- int i, res = 1;
struct slave *slave = NULL;
struct slave *check_slave;
+ int res = 1;
if (!skb->queue_mapping)
return 1;
/* Find out if any slaves have the same mapping as this skb. */
- bond_for_each_slave(bond, check_slave, i) {
+ bond_for_each_slave_rcu(bond, check_slave) {
if (check_slave->queue_id == skb->queue_mapping) {
slave = check_slave;
break;
@@ -4182,14 +3861,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (is_netpoll_tx_blocked(dev))
return NETDEV_TX_BUSY;
- read_lock(&bond->lock);
-
- if (bond->slave_cnt)
+ rcu_read_lock();
+ if (!list_empty(&bond->slave_list))
ret = __bond_start_xmit(skb, dev);
else
kfree_skb(skb);
-
- read_unlock(&bond->lock);
+ rcu_read_unlock();
return ret;
}
@@ -4230,9 +3907,8 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
struct ethtool_cmd *ecmd)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
- int i;
unsigned long speed = 0;
+ struct slave *slave;
ecmd->duplex = DUPLEX_UNKNOWN;
ecmd->port = PORT_OTHER;
@@ -4243,7 +3919,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
* this is an accurate maximum.
*/
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (SLAVE_IS_OK(slave)) {
if (slave->speed != SPEED_UNKNOWN)
speed += slave->speed;
@@ -4254,6 +3930,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
}
ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
read_unlock(&bond->lock);
+
return 0;
}
@@ -4317,12 +3994,11 @@ static void bond_setup(struct net_device *bond_dev)
/* initialize rwlocks */
rwlock_init(&bond->lock);
rwlock_init(&bond->curr_slave_lock);
-
+ INIT_LIST_HEAD(&bond->slave_list);
bond->params = bonding_defaults;
/* Initialize pointers */
bond->dev = bond_dev;
- INIT_LIST_HEAD(&bond->vlan_list);
/* Initialize the device entry points */
ether_setup(bond_dev);
@@ -4374,23 +4050,18 @@ static void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct vlan_entry *vlan, *tmp;
+ struct slave *slave, *tmp_slave;
bond_netpoll_cleanup(bond_dev);
/* Release the bonded slaves */
- while (bond->first_slave != NULL)
- __bond_release_one(bond_dev, bond->first_slave->dev, true);
+ list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+ __bond_release_one(bond_dev, slave->dev, true);
pr_info("%s: released all slaves\n", bond_dev->name);
list_del(&bond->bond_list);
bond_debug_unregister(bond);
-
- list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
- list_del(&vlan->vlan_list);
- kfree(vlan);
- }
}
/*------------------------- Module initialization ---------------------------*/
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 4060d41f0ee7..20a6ee25bb63 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -12,7 +12,6 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
struct bonding *bond = seq->private;
loff_t off = 0;
struct slave *slave;
- int i;
/* make sure the bond won't be taken away */
rcu_read_lock();
@@ -21,10 +20,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave)
if (++off == *pos)
return slave;
- }
return NULL;
}
@@ -36,11 +34,13 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos;
if (v == SEQ_START_TOKEN)
- return bond->first_slave;
+ return bond_first_slave(bond);
- slave = slave->next;
+ if (bond_is_last_slave(bond, slave))
+ return NULL;
+ slave = bond_next_slave(bond, slave);
- return (slave == bond->first_slave) ? NULL : slave;
+ return slave;
}
static void bond_info_seq_stop(struct seq_file *seq, void *v)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index dc36a3d7d9e9..ce4677668e2c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -209,12 +209,12 @@ void bond_destroy_slave_symlinks(struct net_device *master,
static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct slave *slave;
- int i, res = 0;
struct bonding *bond = to_bond(d);
+ struct slave *slave;
+ int res = 0;
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -227,6 +227,7 @@ static ssize_t bonding_show_slaves(struct device *d,
read_unlock(&bond->lock);
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
+
return res;
}
@@ -325,7 +326,7 @@ static ssize_t bonding_store_mode(struct device *d,
goto out;
}
- if (bond->slave_cnt > 0) {
+ if (!list_empty(&bond->slave_list)) {
pr_err("unable to update mode of %s because it has slaves.\n",
bond->dev->name);
ret = -EPERM;
@@ -501,20 +502,25 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value;
+ int new_value, ret = count;
struct bonding *bond = to_bond(d);
- if (bond->slave_cnt != 0) {
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (!list_empty(&bond->slave_list)) {
pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
bond->dev->name);
- return -EPERM;
+ ret = -EPERM;
+ goto out;
}
new_value = bond_parse_parm(buf, fail_over_mac_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
bond->dev->name, buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
bond->params.fail_over_mac = new_value;
@@ -522,7 +528,9 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
bond->dev->name, fail_over_mac_tbl[new_value].modename,
new_value);
- return count;
+out:
+ rtnl_unlock();
+ return ret;
}
static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
@@ -661,7 +669,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
/* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, i)
+ bond_for_each_slave(bond, slave)
slave->target_last_arp_rx[ind] = jiffies;
targets[ind] = newtarget;
write_unlock_bh(&bond->lock);
@@ -687,7 +695,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
targets_rx = slave->target_last_arp_rx;
j = ind;
for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -844,8 +852,11 @@ static ssize_t bonding_store_lacp(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
if (bond->dev->flags & IFF_UP) {
pr_err("%s: Unable to update LACP rate because interface is up.\n",
@@ -875,6 +886,8 @@ static ssize_t bonding_store_lacp(struct device *d,
ret = -EINVAL;
}
out:
+ rtnl_unlock();
+
return ret;
}
static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
@@ -1078,10 +1091,9 @@ static ssize_t bonding_store_primary(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int i;
- struct slave *slave;
struct bonding *bond = to_bond(d);
char ifname[IFNAMSIZ];
+ struct slave *slave;
if (!rtnl_trylock())
return restart_syscall();
@@ -1107,7 +1119,7 @@ static ssize_t bonding_store_primary(struct device *d,
goto out;
}
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
pr_info("%s: Setting %s as primary slave.\n",
bond->dev->name, slave->dev->name);
@@ -1236,16 +1248,16 @@ static ssize_t bonding_show_active_slave(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct slave *curr;
struct bonding *bond = to_bond(d);
+ struct slave *curr;
int count = 0;
- read_lock(&bond->curr_slave_lock);
- curr = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
+ rcu_read_lock();
+ curr = rcu_dereference(bond->curr_active_slave);
if (USES_PRIMARY(bond->params.mode) && curr)
count = sprintf(buf, "%s\n", curr->dev->name);
+ rcu_read_unlock();
+
return count;
}
@@ -1253,16 +1265,14 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int i;
- struct slave *slave;
- struct slave *old_active = NULL;
- struct slave *new_active = NULL;
+ struct slave *slave, *old_active, *new_active;
struct bonding *bond = to_bond(d);
char ifname[IFNAMSIZ];
if (!rtnl_trylock())
return restart_syscall();
+ old_active = new_active = NULL;
block_netpoll_tx();
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
@@ -1279,12 +1289,12 @@ static ssize_t bonding_store_active_slave(struct device *d,
if (!strlen(ifname) || buf[0] == '\n') {
pr_info("%s: Clearing current active slave.\n",
bond->dev->name);
- bond->curr_active_slave = NULL;
+ rcu_assign_pointer(bond->curr_active_slave, NULL);
bond_select_active_slave(bond);
goto out;
}
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
old_active = bond->curr_active_slave;
new_active = slave;
@@ -1295,8 +1305,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
bond->dev->name,
slave->dev->name);
goto out;
- }
- else {
+ } else {
if ((new_active) &&
(old_active) &&
(new_active->link == BOND_LINK_UP) &&
@@ -1307,8 +1316,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
slave->dev->name);
bond_change_active_slave(bond,
new_active);
- }
- else {
+ } else {
pr_info("%s: Could not set %s as"
" active slave; either %s is"
" down or the link is down.\n",
@@ -1344,14 +1352,9 @@ static ssize_t bonding_show_mii_status(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct slave *curr;
struct bonding *bond = to_bond(d);
- read_lock(&bond->curr_slave_lock);
- curr = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
- return sprintf(buf, "%s\n", curr ? "up" : "down");
+ return sprintf(buf, "%s\n", bond->curr_active_slave ? "up" : "down");
}
static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
@@ -1470,15 +1473,15 @@ static ssize_t bonding_show_queue_id(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct slave *slave;
- int i, res = 0;
struct bonding *bond = to_bond(d);
+ struct slave *slave;
+ int res = 0;
if (!rtnl_trylock())
return restart_syscall();
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
@@ -1493,6 +1496,7 @@ static ssize_t bonding_show_queue_id(struct device *d,
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
rtnl_unlock();
+
return res;
}
@@ -1507,7 +1511,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
struct slave *slave, *update_slave;
struct bonding *bond = to_bond(d);
u16 qid;
- int i, ret = count;
+ int ret = count;
char *delim;
struct net_device *sdev = NULL;
@@ -1542,7 +1546,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
/* Search for thes slave and check for duplicate qids */
update_slave = NULL;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (sdev == slave->dev)
/*
* We don't need to check the matching
@@ -1594,8 +1598,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int i, new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
struct slave *slave;
if (sscanf(buf, "%d", &new_value) != 1) {
@@ -1618,7 +1622,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
}
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (!bond_is_active_slave(slave)) {
if (new_value)
slave->inactive = 0;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 42d1c6599cba..f7ab16185f68 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -71,6 +71,28 @@
set_fs(fs); \
res; })
+/* slave list primitives */
+#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+
+/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
+#define bond_first_slave(bond) \
+ list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+#define bond_last_slave(bond) \
+ (list_empty(&(bond)->slave_list) ? NULL : \
+ bond_to_slave((bond)->slave_list.prev))
+
+#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
+#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
+
+/* Since bond_first/last_slave can return NULL, these can return NULL too */
+#define bond_next_slave(bond, pos) \
+ (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
+ bond_to_slave((pos)->list.next))
+
+#define bond_prev_slave(bond, pos) \
+ (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
+ bond_to_slave((pos)->list.prev))
+
/**
* bond_for_each_slave_from - iterate the slaves list from a starting point
* @bond: the bond holding this list.
@@ -80,37 +102,33 @@
*
* Caller must hold bond->lock
*/
-#define bond_for_each_slave_from(bond, pos, cnt, start) \
- for (cnt = 0, pos = start; \
- cnt < (bond)->slave_cnt; \
- cnt++, pos = (pos)->next)
+#define bond_for_each_slave_from(bond, pos, cnt, start) \
+ for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
+ cnt++, pos = bond_next_slave(bond, pos))
/**
- * bond_for_each_slave_from_to - iterate the slaves list from start point to stop point
- * @bond: the bond holding this list.
- * @pos: current slave.
- * @cnt: counter for number max of moves
- * @start: start point.
- * @stop: stop point.
+ * bond_for_each_slave - iterate over all slaves
+ * @bond: the bond holding this list
+ * @pos: current slave
*
* Caller must hold bond->lock
*/
-#define bond_for_each_slave_from_to(bond, pos, cnt, start, stop) \
- for (cnt = 0, pos = start; \
- ((cnt < (bond)->slave_cnt) && (pos != (stop)->next)); \
- cnt++, pos = (pos)->next)
+#define bond_for_each_slave(bond, pos) \
+ list_for_each_entry(pos, &(bond)->slave_list, list)
+
+/* Caller must have rcu_read_lock */
+#define bond_for_each_slave_rcu(bond, pos) \
+ list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
/**
- * bond_for_each_slave - iterate the slaves list from head
- * @bond: the bond holding this list.
- * @pos: current slave.
- * @cnt: counter for max number of moves
+ * bond_for_each_slave_reverse - iterate in reverse from a given position
+ * @bond: the bond holding this list
+ * @pos: slave to continue from
*
* Caller must hold bond->lock
*/
-#define bond_for_each_slave(bond, pos, cnt) \
- bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave)
-
+#define bond_for_each_slave_continue_reverse(bond, pos) \
+ list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -167,15 +185,9 @@ struct bond_parm_tbl {
#define BOND_MAX_MODENAME_LEN 20
-struct vlan_entry {
- struct list_head vlan_list;
- unsigned short vlan_id;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
- struct slave *next;
- struct slave *prev;
+ struct list_head list;
struct bonding *bond; /* our master */
int delay;
unsigned long jiffies;
@@ -215,7 +227,7 @@ struct slave {
*/
struct bonding {
struct net_device *dev; /* first - useful for panic debug */
- struct slave *first_slave;
+ struct list_head slave_list;
struct slave *curr_active_slave;
struct slave *current_arp_slave;
struct slave *primary_slave;
@@ -237,7 +249,6 @@ struct bonding {
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
struct bond_params params;
- struct list_head vlan_list;
struct workqueue_struct *wq;
struct delayed_work mii_work;
struct delayed_work arp_work;
@@ -250,11 +261,6 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */
};
-static inline bool bond_vlan_used(struct bonding *bond)
-{
- return !list_empty(&bond->vlan_list);
-}
-
#define bond_slave_get_rcu(dev) \
((struct slave *) rcu_dereference(dev->rx_handler_data))
@@ -270,13 +276,10 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
struct net_device *slave_dev)
{
struct slave *slave = NULL;
- int i;
- bond_for_each_slave(bond, slave, i) {
- if (slave->dev == slave_dev) {
+ bond_for_each_slave(bond, slave)
+ if (slave->dev == slave_dev)
return slave;
- }
- }
return NULL;
}
@@ -416,10 +419,20 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
return addr;
}
+static inline bool slave_can_tx(struct slave *slave)
+{
+ if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
+ bond_is_active_slave(slave))
+ return true;
+ else
+ return false;
+}
+
struct bond_net;
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
@@ -477,10 +490,9 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
static inline struct slave *bond_slave_has_mac(struct bonding *bond,
const u8 *mac)
{
- int i = 0;
struct slave *tmp;
- bond_for_each_slave(bond, tmp, i)
+ bond_for_each_slave(bond, tmp)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return tmp;
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 34dea95d58db..88a6a5810ec6 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -347,7 +347,9 @@ static int ldisc_open(struct tty_struct *tty)
/* release devices to avoid name collision */
ser_release(NULL);
- sprintf(name, "cf%s", tty->name);
+ result = snprintf(name, sizeof(name), "cf%s", tty->name);
+ if (result >= IFNAMSIZ)
+ return -EINVAL;
dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
if (!dev)
return -ENOMEM;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index dbbe97ae121e..3b1ff6148702 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1355,7 +1355,7 @@ static int at91_can_probe(struct platform_device *pdev)
if (at91_is_sam9263(priv))
dev->sysfs_groups[0] = &at91_sysfs_attr_group;
- dev_set_drvdata(&pdev->dev, dev);
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = register_candev(dev);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index c6f838d922a5..294ced3cc227 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -195,7 +195,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
+ if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
priv->raminit = c_can_hw_raminit;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 7b0be0910f4b..71c677e651d7 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -850,12 +850,17 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- clk_prepare_enable(priv->clk_ipg);
- clk_prepare_enable(priv->clk_per);
+ err = clk_prepare_enable(priv->clk_ipg);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(priv->clk_per);
+ if (err)
+ goto out_disable_ipg;
err = open_candev(dev);
if (err)
- goto out;
+ goto out_disable_per;
err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
@@ -875,8 +880,9 @@ static int flexcan_open(struct net_device *dev)
out_close:
close_candev(dev);
- out:
+ out_disable_per:
clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg);
return err;
@@ -933,8 +939,13 @@ static int register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg, err;
- clk_prepare_enable(priv->clk_ipg);
- clk_prepare_enable(priv->clk_per);
+ err = clk_prepare_enable(priv->clk_ipg);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(priv->clk_per);
+ if (err)
+ goto out_disable_ipg;
/* select "bus clock", chip must be disabled */
flexcan_chip_disable(priv);
@@ -959,15 +970,16 @@ static int register_flexcandev(struct net_device *dev)
if (!(reg & FLEXCAN_MCR_FEN)) {
netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV;
- goto out;
+ goto out_disable_per;
}
err = register_candev(dev);
- out:
+ out_disable_per:
/* disable core and turn off clocks */
flexcan_chip_disable(priv);
clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg);
return err;
@@ -1001,7 +1013,6 @@ static int flexcan_probe(struct platform_device *pdev)
struct resource *mem;
struct clk *clk_ipg = NULL, *clk_per = NULL;
void __iomem *base;
- resource_size_t mem_size;
int err, irq;
u32 clock_freq = 0;
@@ -1013,43 +1024,25 @@ static int flexcan_probe(struct platform_device *pdev)
clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(clk_ipg)) {
dev_err(&pdev->dev, "no ipg clock defined\n");
- err = PTR_ERR(clk_ipg);
- goto failed_clock;
+ return PTR_ERR(clk_ipg);
}
clock_freq = clk_get_rate(clk_ipg);
clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(clk_per)) {
dev_err(&pdev->dev, "no per clock defined\n");
- err = PTR_ERR(clk_per);
- goto failed_clock;
+ return PTR_ERR(clk_per);
}
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!mem || irq <= 0) {
- err = -ENODEV;
- goto failed_get;
- }
+ if (irq <= 0)
+ return -ENODEV;
- mem_size = resource_size(mem);
- if (!request_mem_region(mem->start, mem_size, pdev->name)) {
- err = -EBUSY;
- goto failed_get;
- }
-
- base = ioremap(mem->start, mem_size);
- if (!base) {
- err = -ENOMEM;
- goto failed_map;
- }
-
- dev = alloc_candev(sizeof(struct flexcan_priv), 1);
- if (!dev) {
- err = -ENOMEM;
- goto failed_alloc;
- }
+ base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
of_id = of_match_device(flexcan_of_match, &pdev->dev);
if (of_id) {
@@ -1058,10 +1051,13 @@ static int flexcan_probe(struct platform_device *pdev)
devtype_data = (struct flexcan_devtype_data *)
pdev->id_entry->driver_data;
} else {
- err = -ENODEV;
- goto failed_devtype;
+ return -ENODEV;
}
+ dev = alloc_candev(sizeof(struct flexcan_priv), 1);
+ if (!dev)
+ return -ENOMEM;
+
dev->netdev_ops = &flexcan_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -1087,7 +1083,7 @@ static int flexcan_probe(struct platform_device *pdev)
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
- dev_set_drvdata(&pdev->dev, dev);
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = register_flexcandev(dev);
@@ -1104,28 +1100,15 @@ static int flexcan_probe(struct platform_device *pdev)
return 0;
failed_register:
- failed_devtype:
free_candev(dev);
- failed_alloc:
- iounmap(base);
- failed_map:
- release_mem_region(mem->start, mem_size);
- failed_get:
- failed_clock:
return err;
}
static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct flexcan_priv *priv = netdev_priv(dev);
- struct resource *mem;
unregister_flexcandev(dev);
- iounmap(priv->base);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
free_candev(dev);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 8cda23bf0614..fe7dd696957e 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -37,9 +37,6 @@
*
* static struct mcp251x_platform_data mcp251x_info = {
* .oscillator_frequency = 8000000,
- * .board_specific_setup = &mcp251x_setup,
- * .power_enable = mcp251x_power_enable,
- * .transceiver_enable = NULL,
* };
*
* static struct spi_board_info spi_board_info[] = {
@@ -76,6 +73,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
/* SPI interface instruction set */
#define INSTRUCTION_WRITE 0x02
@@ -264,6 +262,8 @@ struct mcp251x_priv {
#define AFTER_SUSPEND_POWER 4
#define AFTER_SUSPEND_RESTART 8
int restart_tx;
+ struct regulator *power;
+ struct regulator *transceiver;
};
#define MCP251X_IS(_model) \
@@ -667,16 +667,25 @@ static int mcp251x_hw_probe(struct spi_device *spi)
return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
}
+static int mcp251x_power_enable(struct regulator *reg, int enable)
+{
+ if (IS_ERR(reg))
+ return 0;
+
+ if (enable)
+ return regulator_enable(reg);
+ else
+ return regulator_disable(reg);
+}
+
static void mcp251x_open_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
free_irq(spi->irq, priv);
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
close_candev(net);
}
@@ -684,7 +693,6 @@ static int mcp251x_stop(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
close_candev(net);
@@ -704,8 +712,7 @@ static int mcp251x_stop(struct net_device *net)
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
priv->can.state = CAN_STATE_STOPPED;
@@ -928,8 +935,7 @@ static int mcp251x_open(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
- unsigned long flags;
+ unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
int ret;
ret = open_candev(net);
@@ -939,25 +945,17 @@ static int mcp251x_open(struct net_device *net)
}
mutex_lock(&priv->mcp_lock);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
+ mcp251x_power_enable(priv->transceiver, 1);
priv->force_quit = 0;
priv->tx_skb = NULL;
priv->tx_len = 0;
- flags = IRQF_ONESHOT;
- if (pdata->irq_flags)
- flags |= pdata->irq_flags;
- else
- flags |= IRQF_TRIGGER_FALLING;
-
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
flags, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
close_candev(net);
goto open_unlock;
}
@@ -1026,6 +1024,19 @@ static int mcp251x_can_probe(struct spi_device *spi)
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
+
+ priv->power = devm_regulator_get(&spi->dev, "vdd");
+ priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+ if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
+ (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto error_power;
+ }
+
+ ret = mcp251x_power_enable(priv->power, 1);
+ if (ret)
+ goto error_power;
+
spi_set_drvdata(spi, priv);
priv->spi = spi;
@@ -1068,30 +1079,24 @@ static int mcp251x_can_probe(struct spi_device *spi)
}
}
- if (pdata->power_enable)
- pdata->power_enable(1);
-
- /* Call out to platform specific setup */
- if (pdata->board_specific_setup)
- pdata->board_specific_setup(spi);
-
SET_NETDEV_DEV(net, &spi->dev);
/* Configure the SPI bus */
- spi->mode = SPI_MODE_0;
+ spi->mode = spi->mode ? : SPI_MODE_0;
+ if (mcp251x_is_2510(spi))
+ spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
+ else
+ spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
spi->bits_per_word = 8;
spi_setup(spi);
/* Here is OK to not lock the MCP, no one knows about it yet */
if (!mcp251x_hw_probe(spi)) {
- dev_info(&spi->dev, "Probe failed\n");
+ ret = -ENODEV;
goto error_probe;
}
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
-
ret = register_candev(net);
if (ret)
goto error_probe;
@@ -1109,13 +1114,13 @@ error_rx_buf:
if (!mcp251x_enable_dma)
kfree(priv->spi_tx_buf);
error_tx_buf:
- free_candev(net);
if (mcp251x_enable_dma)
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
+ mcp251x_power_enable(priv->power, 0);
+error_power:
+ free_candev(net);
error_alloc:
- if (pdata->power_enable)
- pdata->power_enable(0);
dev_err(&spi->dev, "probe failed\n");
error_out:
return ret;
@@ -1123,12 +1128,10 @@ error_out:
static int mcp251x_can_remove(struct spi_device *spi)
{
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
unregister_candev(net);
- free_candev(net);
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
@@ -1138,8 +1141,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
kfree(priv->spi_rx_buf);
}
- if (pdata->power_enable)
- pdata->power_enable(0);
+ mcp251x_power_enable(priv->power, 0);
+
+ free_candev(net);
return 0;
}
@@ -1149,7 +1153,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
static int mcp251x_can_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -1163,15 +1166,14 @@ static int mcp251x_can_suspend(struct device *dev)
netif_device_detach(net);
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
priv->after_suspend = AFTER_SUSPEND_UP;
} else {
priv->after_suspend = AFTER_SUSPEND_DOWN;
}
- if (pdata->power_enable) {
- pdata->power_enable(0);
+ if (!IS_ERR(priv->power)) {
+ regulator_disable(priv->power);
priv->after_suspend |= AFTER_SUSPEND_POWER;
}
@@ -1181,16 +1183,14 @@ static int mcp251x_can_suspend(struct device *dev)
static int mcp251x_can_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi);
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
- pdata->power_enable(1);
+ mcp251x_power_enable(priv->power, 1);
queue_work(priv->wq, &priv->restart_work);
} else {
if (priv->after_suspend & AFTER_SUSPEND_UP) {
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
+ mcp251x_power_enable(priv->transceiver, 1);
queue_work(priv->wq, &priv->restart_work);
} else {
priv->after_suspend = 0;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5b0ee8ef5885..e59b3a392af6 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -40,6 +40,7 @@ struct mpc5xxx_can_data {
unsigned int type;
u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name,
int *mscan_clksrc);
+ void (*put_clock)(struct platform_device *ofdev);
};
#ifdef CONFIG_PPC_MPC52xx
@@ -148,7 +149,10 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
goto exit_put;
}
- /* Determine the MSCAN device index from the physical address */
+ /* Determine the MSCAN device index from the peripheral's
+ * physical address. Register address offsets against the
+ * IMMR base are: 0x1300, 0x1380, 0x2300, 0x2380
+ */
pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
BUG_ON(!pval || plen < sizeof(*pval));
clockidx = (*pval & 0x80) ? 1 : 0;
@@ -177,7 +181,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
clockdiv = 1;
if (!clock_name || !strcmp(clock_name, "sys")) {
- sys_clk = clk_get(&ofdev->dev, "sys_clk");
+ sys_clk = devm_clk_get(&ofdev->dev, "sys_clk");
if (IS_ERR(sys_clk)) {
dev_err(&ofdev->dev, "couldn't get sys_clk\n");
goto exit_unmap;
@@ -200,7 +204,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
}
if (clocksrc < 0) {
- ref_clk = clk_get(&ofdev->dev, "ref_clk");
+ ref_clk = devm_clk_get(&ofdev->dev, "ref_clk");
if (IS_ERR(ref_clk)) {
dev_err(&ofdev->dev, "couldn't get ref_clk\n");
goto exit_unmap;
@@ -277,6 +281,8 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
dev = alloc_mscandev();
if (!dev)
goto exit_dispose_irq;
+ platform_set_drvdata(ofdev, dev);
+ SET_NETDEV_DEV(dev, &ofdev->dev);
priv = netdev_priv(dev);
priv->reg_base = base;
@@ -293,8 +299,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
goto exit_free_mscan;
}
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
@@ -302,8 +306,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
goto exit_free_mscan;
}
- platform_set_drvdata(ofdev, dev);
-
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
priv->reg_base, dev->irq, priv->can.clock.freq);
@@ -321,10 +323,17 @@ exit_unmap_mem:
static int mpc5xxx_can_remove(struct platform_device *ofdev)
{
+ const struct of_device_id *match;
+ const struct mpc5xxx_can_data *data;
struct net_device *dev = platform_get_drvdata(ofdev);
struct mscan_priv *priv = netdev_priv(dev);
+ match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
+ data = match ? match->data : NULL;
+
unregister_mscandev(dev);
+ if (data && data->put_clock)
+ data->put_clock(ofdev);
iounmap(priv->reg_base);
irq_dispose_mapping(dev->irq);
free_candev(dev);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index e6b40954e204..a955ec8c4b97 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -573,10 +573,21 @@ static int mscan_open(struct net_device *dev)
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs __iomem *regs = priv->reg_base;
+ if (priv->clk_ipg) {
+ ret = clk_prepare_enable(priv->clk_ipg);
+ if (ret)
+ goto exit_retcode;
+ }
+ if (priv->clk_can) {
+ ret = clk_prepare_enable(priv->clk_can);
+ if (ret)
+ goto exit_dis_ipg_clock;
+ }
+
/* common open */
ret = open_candev(dev);
if (ret)
- return ret;
+ goto exit_dis_can_clock;
napi_enable(&priv->napi);
@@ -604,6 +615,13 @@ exit_free_irq:
exit_napi_disable:
napi_disable(&priv->napi);
close_candev(dev);
+exit_dis_can_clock:
+ if (priv->clk_can)
+ clk_disable_unprepare(priv->clk_can);
+exit_dis_ipg_clock:
+ if (priv->clk_ipg)
+ clk_disable_unprepare(priv->clk_ipg);
+exit_retcode:
return ret;
}
@@ -621,6 +639,11 @@ static int mscan_close(struct net_device *dev)
close_candev(dev);
free_irq(dev->irq, dev);
+ if (priv->clk_can)
+ clk_disable_unprepare(priv->clk_can);
+ if (priv->clk_ipg)
+ clk_disable_unprepare(priv->clk_ipg);
+
return 0;
}
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index af2ed8baf0a3..9c24d60a23b1 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -21,6 +21,7 @@
#ifndef __MSCAN_H__
#define __MSCAN_H__
+#include <linux/clk.h>
#include <linux/types.h>
/* MSCAN control register 0 (CANCTL0) bits */
@@ -283,6 +284,8 @@ struct mscan_priv {
unsigned int type; /* MSCAN type variants */
unsigned long flags;
void __iomem *reg_base; /* ioremap'ed address to registers */
+ struct clk *clk_ipg; /* clock for registers */
+ struct clk *clk_can; /* clock for bitrates */
u8 shadow_statflg;
u8 shadow_canrier;
u8 cur_pri;
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a5f91e1e8fe3..becef25fa194 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -148,7 +148,7 @@ config PCMCIA_PCNET
config NE_H8300
tristate "NE2000 compatible support for H8/300"
- depends on H8300
+ depends on H8300H_AKI3068NET || H8300H_H8MAX
---help---
Say Y here if you want to use the NE2000 compatible
controller on the Renesas H8/300 processor.
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e1d26433d619..f92f001551da 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -707,7 +707,7 @@ static int ax_init_dev(struct net_device *dev)
#ifdef CONFIG_AX88796_93CX6
if (ax->plat->flags & AXFLG_HAS_93CX6) {
- unsigned char mac_addr[6];
+ unsigned char mac_addr[ETH_ALEN];
struct eeprom_93cx6 eeprom;
eeprom.data = ei_local;
@@ -719,7 +719,7 @@ static int ax_init_dev(struct net_device *dev)
(__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1);
- memcpy(dev->dev_addr, mac_addr, 6);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
}
#endif
if (ax->plat->wordlength == 2) {
@@ -840,7 +840,7 @@ static int ax_probe(struct platform_device *pdev)
ei_local = netdev_priv(dev);
ax = to_ax_dev(dev);
- ax->plat = pdev->dev.platform_data;
+ ax->plat = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, dev);
ei_local->rxcr_base = ax->plat->rcr_val;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 2037080c504d..506b0248c400 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -90,6 +90,7 @@ source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 390bd0bfaa27..c0b8789952e7 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e904b3838dcc..e66684a438f5 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1647,12 +1647,12 @@ static int bfin_mac_probe(struct platform_device *pdev)
setup_mac_addr(ndev->dev_addr);
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
rc = -ENODEV;
goto out_err_probe_mac;
}
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
lp->mii_bus = platform_get_drvdata(pd);
if (!lp->mii_bus) {
dev_err(&pdev->dev, "Cannot get mii_bus!\n");
@@ -1660,7 +1660,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
goto out_err_probe_mac;
}
lp->mii_bus->priv = ndev;
- mii_bus_data = pd->dev.platform_data;
+ mii_bus_data = dev_get_platdata(&pd->dev);
rc = mii_probe(ndev, mii_bus_data->phy_mode);
if (rc) {
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7ff4b30d55ea..e06694555144 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,18 +1464,18 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL | __GFP_ZERO);
+ greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->tx_bd_base) {
err = -ENOMEM;
goto error3;
}
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL | __GFP_ZERO);
+ greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->rx_bd_base) {
err = -ENOMEM;
goto error4;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index ceb45bc963a9..91d52b495848 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1131,7 +1131,7 @@ static int au1000_probe(struct platform_device *pdev)
writel(0, aup->enable);
aup->mac_enabled = 0;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (!pd) {
dev_info(&pdev->dev, "no platform_data passed,"
" PHY search on MAC0\n");
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index ed2130727643..2d8e28819779 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1521,7 +1521,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
char *chipname;
struct net_device *dev;
const struct pcnet32_access *a = NULL;
- u8 promaddr[6];
+ u8 promaddr[ETH_ALEN];
int ret = -ENODEV;
/* reset the chip */
@@ -1665,10 +1665,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
/* read PROM address and compare with CSR address */
- for (i = 0; i < 6; i++)
+ for (i = 0; i < ETH_ALEN; i++)
promaddr[i] = inb(ioaddr + i);
- if (memcmp(promaddr, dev->dev_addr, 6) ||
+ if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 55d79cb53a79..9e1601487263 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -149,8 +149,6 @@ static void arc_emac_tx_clean(struct net_device *ndev)
struct sk_buff *skb = tx_buff->skb;
unsigned int info = le32_to_cpu(txbd->info);
- *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
if ((info & FOR_EMAC) || !txbd->data)
break;
@@ -180,6 +178,8 @@ static void arc_emac_tx_clean(struct net_device *ndev)
txbd->data = 0;
txbd->info = 0;
+ *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
+
if (netif_queue_stopped(ndev))
netif_wake_queue(ndev);
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 52c96036dcc4..2fa5b86f139d 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -130,7 +130,7 @@ config BNX2X_SRIOV
config BGMAC
tristate "BCMA bus GBit core support"
- depends on BCMA_HOST_SOC && HAS_DMA
+ depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
select PHYLIB
---help---
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b1bcd4ba4744..8ac48fbf8a66 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -948,8 +948,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_freeirq_tx;
@@ -960,8 +959,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_free_rx_ring;
@@ -1747,11 +1745,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (!bcm_enet_shared_base[0])
return -ENODEV;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
+ if (!res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV;
ret = 0;
@@ -1767,9 +1764,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
- if (priv->base == NULL) {
- ret = -ENOMEM;
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
goto out;
}
@@ -1800,7 +1798,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
priv->has_phy = pd->has_phy;
@@ -1964,7 +1962,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
} else {
struct bcm63xx_enet_platform_data *pd;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd && pd->mii_config)
pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii);
@@ -2742,7 +2740,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
memcpy(priv->used_ports, pd->used_ports,
@@ -2836,7 +2834,6 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6a2de1d79ff6..e838a3f74b69 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2011 Broadcom Corporation
+ * Copyright (c) 2004-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.3"
-#define DRV_MODULE_RELDATE "June 27, 2012"
+#define DRV_MODULE_VERSION "2.2.4"
+#define DRV_MODULE_RELDATE "Aug 05, 2013"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -853,9 +853,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
- status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
if (status_blk == NULL)
goto alloc_mem_err;
@@ -3908,136 +3907,121 @@ init_cpu_err:
return rc;
}
-static int
-bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+static void
+bnx2_setup_wol(struct bnx2 *bp)
{
- u16 pmcsr;
+ int i;
+ u32 val, wol_msg;
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (bp->wol) {
+ u32 advertising;
+ u8 autoneg;
- switch (state) {
- case PCI_D0: {
- u32 val;
+ autoneg = bp->autoneg;
+ advertising = bp->advertising;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
- (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
- PCI_PM_CTRL_PME_STATUS);
+ if (bp->phy_port == PORT_TP) {
+ bp->autoneg = AUTONEG_SPEED;
+ bp->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_Autoneg;
+ }
- if (pmcsr & PCI_PM_CTRL_STATE_MASK)
- /* delay required during transition out of D3hot */
- msleep(20);
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_setup_phy(bp, bp->phy_port);
+ spin_unlock_bh(&bp->phy_lock);
- val = BNX2_RD(bp, BNX2_EMAC_MODE);
- val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
- val &= ~BNX2_EMAC_MODE_MPKT;
- BNX2_WR(bp, BNX2_EMAC_MODE, val);
+ bp->autoneg = autoneg;
+ bp->advertising = advertising;
- val = BNX2_RD(bp, BNX2_RPM_CONFIG);
- val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
- BNX2_WR(bp, BNX2_RPM_CONFIG, val);
- break;
- }
- case PCI_D3hot: {
- int i;
- u32 val, wol_msg;
-
- if (bp->wol) {
- u32 advertising;
- u8 autoneg;
-
- autoneg = bp->autoneg;
- advertising = bp->advertising;
-
- if (bp->phy_port == PORT_TP) {
- bp->autoneg = AUTONEG_SPEED;
- bp->advertising = ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_Autoneg;
- }
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
- spin_lock_bh(&bp->phy_lock);
- bnx2_setup_phy(bp, bp->phy_port);
- spin_unlock_bh(&bp->phy_lock);
+ val = BNX2_RD(bp, BNX2_EMAC_MODE);
- bp->autoneg = autoneg;
- bp->advertising = advertising;
+ /* Enable port mode. */
+ val &= ~BNX2_EMAC_MODE_PORT;
+ val |= BNX2_EMAC_MODE_MPKT_RCVD |
+ BNX2_EMAC_MODE_ACPI_RCVD |
+ BNX2_EMAC_MODE_MPKT;
+ if (bp->phy_port == PORT_TP) {
+ val |= BNX2_EMAC_MODE_PORT_MII;
+ } else {
+ val |= BNX2_EMAC_MODE_PORT_GMII;
+ if (bp->line_speed == SPEED_2500)
+ val |= BNX2_EMAC_MODE_25G_MODE;
+ }
- bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+ BNX2_WR(bp, BNX2_EMAC_MODE, val);
- val = BNX2_RD(bp, BNX2_EMAC_MODE);
+ /* receive all multicast */
+ for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+ BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+ 0xffffffff);
+ }
+ BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
- /* Enable port mode. */
- val &= ~BNX2_EMAC_MODE_PORT;
- val |= BNX2_EMAC_MODE_MPKT_RCVD |
- BNX2_EMAC_MODE_ACPI_RCVD |
- BNX2_EMAC_MODE_MPKT;
- if (bp->phy_port == PORT_TP)
- val |= BNX2_EMAC_MODE_PORT_MII;
- else {
- val |= BNX2_EMAC_MODE_PORT_GMII;
- if (bp->line_speed == SPEED_2500)
- val |= BNX2_EMAC_MODE_25G_MODE;
- }
+ val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
- BNX2_WR(bp, BNX2_EMAC_MODE, val);
+ /* Need to enable EMAC and RPM for WOL. */
+ BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+ BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
- /* receive all multicast */
- for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
- BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
- 0xffffffff);
- }
- BNX2_WR(bp, BNX2_EMAC_RX_MODE,
- BNX2_EMAC_RX_MODE_SORT_MODE);
+ val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+ val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+ BNX2_WR(bp, BNX2_RPM_CONFIG, val);
- val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
- BNX2_RPM_SORT_USER0_MC_EN;
- BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
- BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
- BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
- BNX2_RPM_SORT_USER0_ENA);
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+ } else {
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+ }
- /* Need to enable EMAC and RPM for WOL. */
- BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
- BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
- BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
- BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
+ if (!(bp->flags & BNX2_FLAG_NO_WOL))
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
- val = BNX2_RD(bp, BNX2_RPM_CONFIG);
- val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
- BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+}
- wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
- }
- else {
- wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
- }
+static int
+bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+{
+ switch (state) {
+ case PCI_D0: {
+ u32 val;
+
+ pci_enable_wake(bp->pdev, PCI_D0, false);
+ pci_set_power_state(bp->pdev, PCI_D0);
- if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
- 1, 0);
+ val = BNX2_RD(bp, BNX2_EMAC_MODE);
+ val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
+ val &= ~BNX2_EMAC_MODE_MPKT;
+ BNX2_WR(bp, BNX2_EMAC_MODE, val);
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+ val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+ BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+ break;
+ }
+ case PCI_D3hot: {
+ bnx2_setup_wol(bp);
+ pci_wake_from_d3(bp->pdev, bp->wol);
if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
(BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
if (bp->wol)
- pmcsr |= 3;
- }
- else {
- pmcsr |= 3;
- }
- if (bp->wol) {
- pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+ pci_set_power_state(bp->pdev, PCI_D3hot);
+ } else {
+ pci_set_power_state(bp->pdev, PCI_D3hot);
}
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
- pmcsr);
/* No more memory access after this point until
* device is brought back to D0.
*/
- udelay(50);
break;
}
default:
@@ -6317,7 +6301,6 @@ bnx2_open(struct net_device *dev)
netif_carrier_off(dev);
- bnx2_set_power_state(bp, PCI_D0);
bnx2_disable_int(bp);
rc = bnx2_setup_int_mode(bp, disable_msi);
@@ -6724,7 +6707,6 @@ bnx2_close(struct net_device *dev)
bnx2_del_napi(bp);
bp->link_up = 0;
netif_carrier_off(bp->dev);
- bnx2_set_power_state(bp, PCI_D3hot);
return 0;
}
@@ -7081,6 +7063,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else {
bp->wol = 0;
}
+
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
return 0;
}
@@ -7156,9 +7141,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = netdev_priv(dev);
int rc;
- if (!netif_running(dev))
- return -EAGAIN;
-
/* parameters already validated in ethtool_get_eeprom */
rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7173,9 +7155,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = netdev_priv(dev);
int rc;
- if (!netif_running(dev))
- return -EAGAIN;
-
/* parameters already validated in ethtool_set_eeprom */
rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7535,8 +7514,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
{
struct bnx2 *bp = netdev_priv(dev);
- bnx2_set_power_state(bp, PCI_D0);
-
memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int i;
@@ -7585,8 +7562,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (!netif_running(bp->dev))
- bnx2_set_power_state(bp, PCI_D3hot);
}
static void
@@ -7658,8 +7633,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
switch (state) {
case ETHTOOL_ID_ACTIVE:
- bnx2_set_power_state(bp, PCI_D0);
-
bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
return 1; /* cycle on/off once per second */
@@ -7680,9 +7653,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
case ETHTOOL_ID_INACTIVE:
BNX2_WR(bp, BNX2_EMAC_LED, 0);
BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
-
- if (!netif_running(dev))
- bnx2_set_power_state(bp, PCI_D3hot);
break;
}
@@ -8130,8 +8100,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_release;
}
- bnx2_set_power_state(bp, PCI_D0);
-
/* Configure byte swap and enable write to the reg_window registers.
* Rely on CPU to do target byte swapping on big endian systems
* The chip's target access swapping will not swap all accesses
@@ -8170,13 +8138,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ if (pdev->msix_cap)
bp->flags |= BNX2_FLAG_MSIX_CAP;
}
if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
- if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
+ if (pdev->msi_cap)
bp->flags |= BNX2_FLAG_MSI_CAP;
}
@@ -8369,6 +8337,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->wol = 0;
}
+ if (bp->flags & BNX2_FLAG_NO_WOL)
+ device_set_wakeup_capable(&bp->pdev->dev, false);
+ else
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
bp->tx_quick_cons_trip_int =
bp->tx_quick_cons_trip;
@@ -8609,46 +8582,52 @@ bnx2_remove_one(struct pci_dev *pdev)
}
static int
-bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
+bnx2_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- /* PCI register 4 needs to be saved whether netif_running() or not.
- * MSI address and data need to be saved if using MSI and
- * netif_running().
- */
- pci_save_state(pdev);
- if (!netif_running(dev))
- return 0;
-
- cancel_work_sync(&bp->reset_task);
- bnx2_netif_stop(bp, true);
- netif_device_detach(dev);
- del_timer_sync(&bp->timer);
- bnx2_shutdown_chip(bp);
- bnx2_free_skbs(bp);
- bnx2_set_power_state(bp, pci_choose_state(pdev, state));
+ if (netif_running(dev)) {
+ cancel_work_sync(&bp->reset_task);
+ bnx2_netif_stop(bp, true);
+ netif_device_detach(dev);
+ del_timer_sync(&bp->timer);
+ bnx2_shutdown_chip(bp);
+ __bnx2_free_irq(bp);
+ bnx2_free_skbs(bp);
+ }
+ bnx2_setup_wol(bp);
return 0;
}
static int
-bnx2_resume(struct pci_dev *pdev)
+bnx2_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- pci_restore_state(pdev);
if (!netif_running(dev))
return 0;
bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
+ bnx2_request_irq(bp);
bnx2_init_nic(bp, 1);
bnx2_netif_start(bp, true);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
+#define BNX2_PM_OPS (&bnx2_pm_ops)
+
+#else
+
+#define BNX2_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
/**
* bnx2_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -8694,24 +8673,28 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- pci_ers_result_t result;
- int err;
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
+ int err = 0;
rtnl_lock();
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
- result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- if (netif_running(dev)) {
- bnx2_set_power_state(bp, PCI_D0);
- bnx2_init_nic(bp, 1);
- }
- result = PCI_ERS_RESULT_RECOVERED;
+ if (netif_running(dev))
+ err = bnx2_init_nic(bp, 1);
+
+ if (!err)
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
+ bnx2_napi_enable(bp);
+ dev_close(dev);
}
rtnl_unlock();
@@ -8748,6 +8731,28 @@ static void bnx2_io_resume(struct pci_dev *pdev)
rtnl_unlock();
}
+static void bnx2_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp;
+
+ if (!dev)
+ return;
+
+ bp = netdev_priv(dev);
+ if (!bp)
+ return;
+
+ rtnl_lock();
+ if (netif_running(dev))
+ dev_close(bp->dev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ bnx2_set_power_state(bp, PCI_D3hot);
+
+ rtnl_unlock();
+}
+
static const struct pci_error_handlers bnx2_err_handler = {
.error_detected = bnx2_io_error_detected,
.slot_reset = bnx2_io_slot_reset,
@@ -8759,9 +8764,9 @@ static struct pci_driver bnx2_pci_driver = {
.id_table = bnx2_pci_tbl,
.probe = bnx2_init_one,
.remove = bnx2_remove_one,
- .suspend = bnx2_suspend,
- .resume = bnx2_resume,
+ .driver.pm = BNX2_PM_OPS,
.err_handler = &bnx2_err_handler,
+ .shutdown = bnx2_shutdown,
};
module_pci_driver(bnx2_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 172efbecfea2..18cb2d23e56b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -1,6 +1,6 @@
/* bnx2.h: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2011 Broadcom Corporation
+ * Copyright (c) 2004-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 00b88cbfde25..0c338026ce01 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
-#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
+#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8"
#endif
-#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \
do { \
- writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
- DPM_TRIGER_TYPE); \
+ writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
} while (0)
/* TX CSUM helpers */
@@ -1100,13 +1098,27 @@ struct bnx2x_port {
extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64
-#define BNX2X_VF_CID_WND 0
+#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
-#define BNX2X_CLIENTS_PER_VF 1
-#define BNX2X_FIRST_VF_CID 256
+
+/* We need to reserve doorbell addresses for all VF and queue combinations */
#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
+
+/* The doorbell is configured to have the same number of CIDs for PFs and for
+ * VFs. For this reason the PF CID zone is as large as the VF zone.
+ */
+#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
+#define BNX2X_MAX_NUM_VF_QUEUES 64
#define BNX2X_VF_ID_INVALID 0xFF
+/* the number of VF CIDS multiplied by the amount of bytes reserved for each
+ * cid must not exceed the size of the VF doorbell
+ */
+#define BNX2X_VF_BAR_SIZE 512
+#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
+#error "VF doorbell bar size is 512"
+#endif
+
/*
* The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
* control by the number of fast-path status blocks supported by the
@@ -1331,7 +1343,7 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_TX_RESUME,
@@ -1650,10 +1662,10 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
* context size we need 8 ILT entries.
*/
-#define ILT_MAX_L2_LINES 8
+#define ILT_MAX_L2_LINES 32
struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
@@ -1869,7 +1881,7 @@ extern int num_queues;
#define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */
-
+#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params {
/* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -2069,9 +2081,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
- GFP_KERNEL | __GFP_ZERO)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0cc26110868d..2361bf236ce3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1948,7 +1948,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
}
}
-static int bnx2x_init_rss_pf(struct bnx2x *bp)
+static int bnx2x_init_rss(struct bnx2x *bp)
{
int i;
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
@@ -1972,8 +1972,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash)
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable)
{
struct bnx2x_config_rss_params params = {NULL};
@@ -1988,17 +1988,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
- __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
-
- /* RSS configuration */
- __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
- if (rss_obj->udp_rss_v4)
- __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
- if (rss_obj->udp_rss_v6)
- __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ if (enable) {
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+
+ /* RSS configuration */
+ __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ } else {
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+ }
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
@@ -2007,11 +2011,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- prandom_bytes(params.rss_key, sizeof(params.rss_key));
+ prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
- return bnx2x_config_rss(bp, &params);
+ if (IS_PF(bp))
+ return bnx2x_config_rss(bp, &params);
+ else
+ return bnx2x_vfpf_config_rss(bp, &params);
}
static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
@@ -2066,7 +2073,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
rparam.mcast_obj = &bp->mcast_obj;
__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
- /* Add a DEL command... */
+ /* Add a DEL command... - Since we're doing a driver cleanup only,
+ * we take a lock surrounding both the initial send and the CONTs,
+ * as we don't want a true completion to disrupt us in the middle.
+ */
+ netif_addr_lock_bh(bp->dev);
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0)
BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -2078,11 +2089,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
if (rc < 0) {
BNX2X_ERR("Failed to clean multi-cast object: %d\n",
rc);
+ netif_addr_unlock_bh(bp->dev);
return;
}
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
}
+ netif_addr_unlock_bh(bp->dev);
}
#ifndef BNX2X_STOP_ON_ERROR
@@ -2438,9 +2451,7 @@ int bnx2x_load_cnic(struct bnx2x *bp)
}
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
@@ -2647,38 +2658,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* initialize FW coalescing state machines in RAM */
bnx2x_update_coalesce(bp);
+ }
- /* setup the leading queue */
- rc = bnx2x_setup_leading(bp);
- if (rc) {
- BNX2X_ERR("Setup leading failed!\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
-
- /* set up the rest of the queues */
- for_each_nondefault_eth_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
- }
+ /* setup the leading queue */
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
- /* setup rss */
- rc = bnx2x_init_rss_pf(bp);
+ /* set up the rest of the queues */
+ for_each_nondefault_eth_queue(bp, i) {
+ if (IS_PF(bp))
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
+ else /* VF */
+ rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
if (rc) {
- BNX2X_ERR("PF RSS init failed\n");
+ BNX2X_ERR("Queue %d setup failed\n", i);
LOAD_ERROR_EXIT(bp, load_error3);
}
+ }
- } else { /* vf */
- for_each_eth_queue(bp, i) {
- rc = bnx2x_vfpf_setup_q(bp, i);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
- }
+ /* setup rss */
+ rc = bnx2x_init_rss(bp);
+ if (rc) {
+ BNX2X_ERR("PF RSS init failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
}
/* Now when Clients are configured we are ready to work */
@@ -2710,9 +2715,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start fast path */
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* Start the Tx */
switch (load_mode) {
@@ -4789,6 +4792,11 @@ int bnx2x_resume(struct pci_dev *pdev)
void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
u32 cid)
{
+ if (!cxt) {
+ BNX2X_ERR("bad context pointer %p\n", cxt);
+ return;
+ }
+
/* ustorm cxt validation */
cxt->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c07a6d054cfe..da8fcaa74495 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -51,8 +51,7 @@ extern int int_mode;
#define BNX2X_PCI_ALLOC(x, y, size) \
do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
- GFP_KERNEL | __GFP_ZERO); \
+ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
@@ -106,9 +105,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
* @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
+ * @enable: enabled or disabled configuration
*/
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash);
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable);
/**
* bnx2x__init_func_obj - init function object
@@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
* netif_addr_lock_bh()
*/
void bnx2x_set_rx_mode(struct net_device *dev);
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
/**
* bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
@@ -980,7 +981,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c5f225101684..2612e3c715d4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3281,14 +3281,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
}
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9d64b988ab34..664568420c9b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params,
struct bnx2x_phy *phy = &params->phy[INT_PHY];
if (vars->line_speed == SPEED_AUTO_NEG &&
(CHIP_IS_E1x(bp) ||
- CHIP_IS_E2(bp)))
+ CHIP_IS_E2(bp))) {
bnx2x_set_parallel_detection(phy, params);
if (params->phy[INT_PHY].config_init)
params->phy[INT_PHY].config_init(phy,
params,
vars);
+ }
}
/* Init external phy*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 815f2dea6337..634a793c1c46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
- REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+
if (!CHIP_REV_IS_SLOW(bp))
/* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
@@ -8063,7 +8063,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
int bnx2x_setup_leading(struct bnx2x *bp)
{
- return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+ if (IS_PF(bp))
+ return bnx2x_setup_queue(bp, &bp->fp[0], true);
+ else /* VF */
+ return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
}
/**
@@ -8077,8 +8080,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
{
int rc = 0;
- if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
+ BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
return -EINVAL;
+ }
switch (int_mode) {
case BNX2X_INT_MODE_MSIX:
@@ -9647,11 +9652,9 @@ sp_rtnl_not_reset:
}
}
- if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state)) {
- DP(BNX2X_MSG_SP,
- "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
- bnx2x_vfpf_storm_rx_mode(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
+ bnx2x_set_rx_mode_inner(bp);
}
if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
@@ -11649,9 +11652,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* second status block for the L2 queue, and a third status block for
* CNIC if supported.
*/
- if (CNIC_SUPPORT(bp))
+ if (IS_VF(bp))
+ bp->min_msix_vec_cnt = 1;
+ else if (CNIC_SUPPORT(bp))
bp->min_msix_vec_cnt = 3;
- else
+ else /* PF w/o cnic */
bp->min_msix_vec_cnt = 2;
BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
@@ -11868,34 +11873,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 rx_mode = BNX2X_RX_MODE_NORMAL;
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
return;
+ } else {
+ /* Schedule an SP task to handle rest of change */
+ DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
+}
+
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
+{
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
- if (dev->flags & IFF_PROMISC)
+ netif_addr_lock_bh(bp->dev);
+
+ if (bp->dev->flags & IFF_PROMISC) {
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if ((dev->flags & IFF_ALLMULTI) ||
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
- CHIP_IS_E1(bp)))
+ } else if ((bp->dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp))) {
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- else {
+ } else {
if (IS_PF(bp)) {
/* some multicasts */
if (bnx2x_set_mc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ /* release bh lock, as bnx2x_set_uc_list might sleep */
+ netif_addr_unlock_bh(bp->dev);
if (bnx2x_set_uc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_PROMISC;
+ netif_addr_lock_bh(bp->dev);
} else {
/* configuring mcast to a vf involves sleeping (when we
- * wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ * wait for the pf's response).
*/
smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
@@ -11913,22 +11932,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ netif_addr_unlock_bh(bp->dev);
return;
}
if (IS_PF(bp)) {
bnx2x_set_storm_rx_mode(bp);
+ netif_addr_unlock_bh(bp->dev);
} else {
- /* configuring rx mode to storms in a vf involves sleeping (when
- * we wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ /* VF will need to request the PF to make this change, and so
+ * the VF needs to release the bottom-half lock prior to the
+ * request (as it will likely require sleep on the VF side)
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ netif_addr_unlock_bh(bp->dev);
+ bnx2x_vfpf_storm_rx_mode(bp);
}
}
@@ -12550,19 +12567,16 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
* @dev: pci device
*
*/
-static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
- int cnic_cnt, bool is_vf)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
{
- int pos, index;
+ int index;
u16 control = 0;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-
/*
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos) {
+ if (!pdev->msix_cap) {
dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
}
@@ -12575,11 +12589,11 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
* without the default SB.
* For VFs there is no default SB, then we return (index+1).
*/
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE;
- return is_vf ? index + 1 : index;
+ return index;
}
static int set_max_cos_est(int chip_id)
@@ -12659,10 +12673,13 @@ static int bnx2x_init_one(struct pci_dev *pdev,
is_vf = set_is_vf(ent->driver_data);
cnic_cnt = is_vf ? 0 : 1;
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+
+ /* add another SB for VF as it has no default SB */
+ max_non_def_sbs += is_vf ? 1 : 0;
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
+ rss_count = max_non_def_sbs - cnic_cnt;
if (rss_count < 1)
return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 8e627b886d7b..5ecf267dc4cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6335,6 +6335,7 @@
#define PCI_ID_VAL2 0x438
#define PCI_ID_VAL3 0x43c
+#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8f03c984550f..9fbeee522d2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending(
}
}
-static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
- struct bnx2x_exe_queue_obj *o)
-{
- spin_lock_bh(&o->lock);
-
- __bnx2x_exe_queue_reset_pending(bp, o);
-
- spin_unlock_bh(&o->lock);
-}
-
/**
* bnx2x_exe_queue_step - execute one execution chunk atomically
*
@@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
* @o: queue
* @ramrod_flags: flags
*
- * (Atomicity is ensured using the exe_queue->lock).
+ * (Should be called while holding the exe_queue->lock).
*/
static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *o,
@@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
memset(&spacer, 0, sizeof(spacer));
- spin_lock_bh(&o->lock);
-
/* Next step should not be performed until the current is finished,
* unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
* properly clear object internals without sending any command to the FW
@@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
__bnx2x_exe_queue_reset_pending(bp, o);
} else {
- spin_unlock_bh(&o->lock);
return 1;
}
}
@@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
}
/* Sanity check */
- if (!cur_len) {
- spin_unlock_bh(&o->lock);
+ if (!cur_len)
return 0;
- }
rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
if (rc < 0)
@@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
*/
__bnx2x_exe_queue_reset_pending(bp, o);
- spin_unlock_bh(&o->lock);
return rc;
}
@@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
return true;
}
+/**
+ * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details: Non-blocking implementation; should be called under execution
+ * queue lock.
+ */
+static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (o->head_reader) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
+ return -EBUSY;
+ }
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
+ return 0;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock; notice it might release
+ * and reclaim it during its run.
+ */
+static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+ unsigned long ramrod_flags = o->saved_ramrod_flags;
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
+ ramrod_flags);
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
+ if (rc != 0) {
+ BNX2X_ERR("execution of pending commands failed with rc %d\n",
+ rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ }
+}
+
+/**
+ * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ * @ramrod_flags: ramrod flags of missed execution
+ *
+ * @details Should be called under execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long ramrod_flags)
+{
+ o->head_exe_request = true;
+ o->saved_ramrod_flags = ramrod_flags;
+ DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
+ ramrod_flags);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would perform it - possibly releasing and
+ * reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* It's possible a new pending execution was added since this writer
+ * executed. If so, execute again. [Ad infinitum]
+ */
+ while (o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
+ __bnx2x_vlan_mac_h_exec_pending(bp, o);
+ }
+}
+
+/**
+ * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would perform it -
+ * possibly releasing and reclaiming the execution queue lock.
+ */
+void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ spin_lock_bh(&o->exe_queue.lock);
+ __bnx2x_vlan_mac_h_write_unlock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under the execution queue lock. May sleep. May
+ * release and reclaim execution queue lock during its run.
+ */
+static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* If we got here, we're holding lock --> no WRITER exists */
+ o->head_reader++;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
+ o->head_reader);
+
+ return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details May sleep. Claims and releases execution queue lock during its run.
+ */
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+
+ spin_lock_bh(&o->exe_queue.lock);
+ rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would be performed if this was the last
+ * reader. possibly releasing and reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (!o->head_reader) {
+ BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ } else {
+ o->head_reader--;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
+ o->head_reader);
+ }
+
+ /* It's possible a new pending execution was added, and that this reader
+ * was last - if so we need to execute the command.
+ */
+ if (!o->head_reader && o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
+
+ /* Writer release will do the trick */
+ __bnx2x_vlan_mac_h_write_unlock(bp, o);
+ }
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would be performed if this
+ * was the last reader. Claims and releases the execution queue lock
+ * during its run.
+ */
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ spin_lock_bh(&o->exe_queue.lock);
+ __bnx2x_vlan_mac_h_read_unlock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+}
+
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
int n, u8 *base, u8 stride, u8 size)
{
struct bnx2x_vlan_mac_registry_elem *pos;
u8 *next = base;
int counter = 0;
+ int read_lock;
+
+ DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
@@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
next += stride + size;
}
}
+
+ if (read_lock == 0) {
+ DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+ }
+
return counter * ETH_ALEN;
}
@@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
return -EBUSY;
}
+static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *ramrod_flags)
+{
+ int rc = 0;
+
+ spin_lock_bh(&o->exe_queue.lock);
+
+ DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
+ rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
+
+ if (rc != 0) {
+ __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
+
+ /* Calling function should not diffrentiate between this case
+ * and the case in which there is already a pending ramrod
+ */
+ rc = 1;
+ } else {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ }
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
/**
* bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
*
@@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
int rc;
+ /* Clearing the pending list & raw state should be made
+ * atomically (as execution flow assumes they represent the same).
+ */
+ spin_lock_bh(&o->exe_queue.lock);
+
/* Reset pending list */
- bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+ __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
/* Clear pending */
r->clear_pending(r);
+ spin_unlock_bh(&o->exe_queue.lock);
+
/* If ramrod failed this is most likely a SW bug */
if (cqe->message.error)
return -EINVAL;
/* Run the next bulk of pending commands if requested */
if (test_bit(RAMROD_CONT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
+
if (rc < 0)
return rc;
}
@@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd(
* @p:
*
*/
-int bnx2x_config_vlan_mac(
- struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p)
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
{
int rc = 0;
struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
@@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac(
/* Execute commands if required */
if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
@@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac(
return rc;
/* Make a next step */
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
- ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp,
+ p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
@@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
unsigned long *ramrod_flags)
{
struct bnx2x_vlan_mac_registry_elem *pos = NULL;
- int rc = 0;
struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ int read_lock;
+ int rc = 0;
/* Clear pending commands first */
@@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
__clear_bit(RAMROD_CONT, &p.ramrod_flags);
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ return read_lock;
+
list_for_each_entry(pos, &o->head, link) {
if (pos->vlan_mac_flags == *vlan_mac_flags) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
@@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
rc = bnx2x_config_vlan_mac(bp, &p);
if (rc < 0) {
BNX2X_ERR("Failed to add a new DEL command\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
return rc;
}
}
}
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+
p.ramrod_flags = *ramrod_flags;
__set_bit(RAMROD_CONT, &p.ramrod_flags);
@@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
struct bnx2x_credit_pool_obj *vlans_pool)
{
INIT_LIST_HEAD(&o->head);
+ o->head_reader = 0;
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
o->macs_pool = macs_pool;
o->vlans_pool = vlans_pool;
@@ -4171,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss;
}
+int validate_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac)
+{
+ if (!vlan_mac->get_n_elements) {
+ BNX2X_ERR("vlan mac object was not intialized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/********************** Queue state object ***********************************/
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 798dfe996733..658f4e33abf9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj {
* entries.
*/
struct list_head head;
+ /* Implement a simple reader/writer lock on the head list.
+ * all these fields should only be accessed under the exe_queue lock
+ */
+ u8 head_reader; /* Num. of readers accessing head list */
+ bool head_exe_request; /* Pending execution request. */
+ unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
/* TODO: Add it's initialization in the init functions */
struct bnx2x_exe_queue_obj exe_queue;
@@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
struct bnx2x_credit_pool_obj *macs_pool,
struct bnx2x_credit_pool_obj *vlans_pool);
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
int bnx2x_config_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p);
+ struct bnx2x_vlan_mac_ramrod_params *p);
int bnx2x_vlan_mac_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p,
@@ -1393,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
+int validate_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e8706e19f96f..b26eb83069b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_DONE
};
+enum bnx2x_vfop_rss_state {
+ BNX2X_VFOP_RSS_CONFIG,
+ BNX2X_VFOP_RSS_DONE
+};
+
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
- if (vfq_is_leading(q)) {
- __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
- __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
- }
-
/* Setup-op rx parameters */
if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
@@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
BNX2X_Q_LOGICAL_STATE_STOPPED) {
DP(BNX2X_MSG_IOV,
"Entered qdtor but queue was already stopped. Aborting gracefully\n");
- goto op_done;
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_DONE;
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
}
/* next state */
@@ -432,8 +436,10 @@ op_err:
op_done:
case BNX2X_VFOP_QDTOR_DONE:
/* invalidate the context */
- qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
- qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ if (qdtor->cxt) {
+ qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
+ qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ }
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
cmd->block);
}
- DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
+ DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
+ vf->abs_vfid, vfop->rc);
return -ENOMEM;
}
@@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
{
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
if (vf) {
+ /* the first igu entry belonging to VFs of this PF */
+ if (!BP_VFDB(bp)->first_vf_igu_entry)
+ BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
+
+ /* the first igu entry belonging to this VF */
if (!vf_sb_count(vf))
vf->igu_base_id = igu_sb_id;
+
++vf_sb_count(vf);
+ ++vf->sb_count;
}
+ BP_VFDB(bp)->vf_sbs_pool++;
}
/* VFOP MAC/VLAN helpers */
@@ -491,12 +506,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp,
* and a valid credit counter
*/
if (!vfop->rc && args->credit) {
- int cnt = 0;
struct list_head *pos;
+ int read_lock;
+ int cnt = 0;
+
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+ if (read_lock)
+ DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
list_for_each(pos, &obj->head)
cnt++;
+ if (!read_lock)
+ bnx2x_vlan_mac_h_read_unlock(bp, obj);
+
atomic_set(args->credit, cnt);
}
}
@@ -692,6 +715,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -711,6 +735,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -731,6 +758,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -753,6 +781,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -773,6 +804,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
int qid, u16 vid, bool add)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -793,6 +825,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
ramrod->user_req.u.vlan.vlan = vid;
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -812,6 +847,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -831,6 +867,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -851,6 +890,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -870,6 +910,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -980,21 +1023,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
+ true);
if (vfop->rc)
goto op_err;
- return;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
+ true);
DP(BNX2X_MSG_IOV,
"VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
vf->abs_vfid, vfop->rc);
if (vfop->rc)
goto op_err;
- return;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
@@ -1291,10 +1338,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ /* for non leading queues skip directly to qdown sate */
if (vfop) {
vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
- bnx2x_vfop_qdown, cmd->done);
+ bnx2x_vfop_opset(qid == LEADING_IDX ?
+ BNX2X_VFOP_QTEARDOWN_RXMODE :
+ BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
+ cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
cmd->block);
}
@@ -1447,15 +1497,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
* both known
*/
static void
-bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
+bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
+ struct vf_pf_resc_request *resc = &vf->alloc_resc;
u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0;
resc->num_txqs = 0;
- /* no credit calculcis for macs (just yet) */
+ /* no credit calculations for macs (just yet) */
resc->num_mac_filters = 1;
/* divvy up vlan rules */
@@ -1467,13 +1518,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
resc->num_mc_filters = 0;
/* num_sbs already set */
+ resc->num_sbs = vf->sb_count;
}
/* FLR routines: */
static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
/* reset the state variables */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
vf->state = VF_FREE;
}
@@ -1693,8 +1745,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
/* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
* the Pf doorbell size although the 2 are independent.
*/
- REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
- BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
/* No security checks for now -
* configure single rule (out of 16) mask = 0x1, value = 0x0,
@@ -1761,7 +1812,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{
int sb_id;
u32 val;
- u8 fid;
+ u8 fid, current_pf = 0;
/* IGU in normal mode - read CAM */
for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
@@ -1769,16 +1820,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
continue;
fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
- if (!(fid & IGU_FID_ENCODE_IS_PF))
+ if (fid & IGU_FID_ENCODE_IS_PF)
+ current_pf = fid & IGU_FID_PF_NUM_MASK;
+ else if (current_pf == BP_ABS_FUNC(bp))
bnx2x_vf_set_igu_info(bp, sb_id,
(fid & IGU_FID_VF_NUM_MASK));
-
DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
(fid & IGU_FID_VF_NUM_MASK)), sb_id,
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
}
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
}
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1844,23 +1897,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
return 0;
}
-static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
-{
- int i;
- u8 queue_count = 0;
-
- if (IS_SRIOV(bp))
- for_each_vf(bp, i)
- queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
-
- return queue_count;
-}
-
/* must be called after PF bars are mapped */
int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
- int num_vfs_param)
+ int num_vfs_param)
{
- int err, i, qcount;
+ int err, i;
struct bnx2x_sriov *iov;
struct pci_dev *dev = bp->pdev;
@@ -1958,12 +1999,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
bnx2x_get_vf_igu_cam_info(bp);
- /* get the total queue count and allocate the global queue arrays */
- qcount = bnx2x_iov_get_max_queue_count(bp);
-
/* allocate the queue arrays for all VFs */
- bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
- GFP_KERNEL);
+ bp->vfdb->vfqs = kzalloc(
+ BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+
+ DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
+
if (!bp->vfdb->vfqs) {
BNX2X_ERR("failed to allocate vf queue array\n");
err = -ENOMEM;
@@ -2084,49 +2126,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
q_type);
DP(BNX2X_MSG_IOV,
- "initialized vf %d's queue object. func id set to %d\n",
- vf->abs_vfid, q->sp_obj.func_id);
-
- /* mac/vlan objects are per queue, but only those
- * that belong to the leading queue are initialized
- */
- if (vfq_is_leading(q)) {
- /* mac */
- bnx2x_init_mac_obj(bp, &q->mac_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, mac_rdata),
- bnx2x_vf_sp_map(bp, vf, mac_rdata),
- BNX2X_FILTER_MAC_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->macs_pool);
- /* vlan */
- bnx2x_init_vlan_obj(bp, &q->vlan_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, vlan_rdata),
- bnx2x_vf_sp_map(bp, vf, vlan_rdata),
- BNX2X_FILTER_VLAN_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->vlans_pool);
-
- /* mcast */
- bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
- q->cid, func_id, func_id,
- bnx2x_vf_sp(bp, vf, mcast_rdata),
- bnx2x_vf_sp_map(bp, vf, mcast_rdata),
- BNX2X_FILTER_MCAST_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX);
-
- vf->leading_rss = cl_id;
- }
+ "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
+ vf->abs_vfid, q->sp_obj.func_id, q->cid);
}
/* called by bnx2x_nic_load */
int bnx2x_iov_nic_init(struct bnx2x *bp)
{
- int vfid, qcount, i;
+ int vfid;
if (!IS_SRIOV(bp)) {
DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
@@ -2155,7 +2162,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
/* init statically provisioned resources */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
/* queues are initialized during VF-ACQUIRE */
@@ -2191,13 +2198,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
}
/* Final VF init */
- qcount = 0;
- for_each_vf(bp, i) {
- struct bnx2x_virtf *vf = BP_VF(bp, i);
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
/* fill in the BDF and bars */
- vf->bus = bnx2x_vf_bus(bp, i);
- vf->devfn = bnx2x_vf_devfn(bp, i);
+ vf->bus = bnx2x_vf_bus(bp, vfid);
+ vf->devfn = bnx2x_vf_devfn(bp, vfid);
bnx2x_vf_set_bars(bp, vf);
DP(BNX2X_MSG_IOV,
@@ -2206,10 +2212,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
(unsigned)vf->bars[0].bar, vf->bars[0].size,
(unsigned)vf->bars[1].bar, vf->bars[1].size,
(unsigned)vf->bars[2].bar, vf->bars[2].size);
-
- /* set local queue arrays */
- vf->vfqs = &bp->vfdb->vfqs[qcount];
- qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
}
return 0;
@@ -2515,6 +2517,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+ dma_addr_t q_stats_addr =
+ vf->fw_stat_map + j * vf->stats_stride;
+
/* collect stats fro active queues only */
if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
BNX2X_Q_LOGICAL_STATE_STOPPED)
@@ -2522,13 +2527,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
/* create stats query entry for this queue */
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = vfq_cl_id(vf, rxq);
+ cur_query_entry->index = vfq_stat_id(vf, rxq);
cur_query_entry->funcID =
cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
cur_query_entry->address.hi =
- cpu_to_le32(U64_HI(vf->fw_stat_map));
+ cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo =
- cpu_to_le32(U64_LO(vf->fw_stat_map));
+ cpu_to_le32(U64_LO(q_stats_addr));
DP(BNX2X_MSG_IOV,
"added address %x %x for vf %d queue %d client %d\n",
cur_query_entry->address.hi,
@@ -2537,6 +2542,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
+
+ /* all stats are coalesced to the leading queue */
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ break;
}
}
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
@@ -2555,6 +2564,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i);
+ if (!vf) {
+ BNX2X_ERR("VF was null! skipping...\n");
+ continue;
+ }
+
if (!list_empty(&vf->op_list_head) &&
atomic_read(&vf->op_in_progress)) {
DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
@@ -2702,7 +2716,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q = vfq_get(vf, i);
if (!q) {
- DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
+ BNX2X_ERR("q number %d was not allocated\n", i);
return -EINVAL;
}
@@ -2930,6 +2944,43 @@ op_done:
bnx2x_vfop_end(bp, vf, vfop);
}
+static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ enum bnx2x_vfop_rss_state state;
+
+ if (!vfop) {
+ BNX2X_ERR("vfop was null\n");
+ return;
+ }
+
+ state = vfop->state;
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_RSS_CONFIG:
+ /* next state */
+ vfop->state = BNX2X_VFOP_RSS_DONE;
+ bnx2x_config_rss(bp, &vfop->op_p->rss);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
+op_done:
+ case BNX2X_VFOP_RSS_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
@@ -2944,6 +2995,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
return -ENOMEM;
}
+int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
+ cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
@@ -2955,6 +3021,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
.block = block,
};
int rc;
+
+ DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
@@ -2983,6 +3051,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
+ /* we don't lock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(tlv)) {
+ BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
+ return;
+ }
+
/* lock the channel */
mutex_lock(&vf->op_mutex);
@@ -2997,19 +3071,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs expected_tlv)
{
+ enum channel_tlvs current_tlv;
+
+ if (!vf) {
+ BNX2X_ERR("VF was %p\n", vf);
+ return;
+ }
+
+ current_tlv = vf->op_current;
+
+ /* we don't unlock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(expected_tlv))
+ return;
+
WARN(expected_tlv != vf->op_current,
"lock mismatch: expected %d found %d", expected_tlv,
vf->op_current);
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+
/* lock the channel */
mutex_unlock(&vf->op_mutex);
/* log the unlock */
DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
vf->abs_vfid, vf->op_current);
-
- /* record the locking op */
- vf->op_current = CHANNEL_TLV_NONE;
}
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
@@ -3040,11 +3127,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
return bnx2x_enable_sriov(bp);
}
}
+#define IGU_ENTRY_SIZE 4
int bnx2x_enable_sriov(struct bnx2x *bp)
{
int rc = 0, req_vfs = bp->requested_nr_virtfn;
+ int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
+ u32 igu_entry, address;
+ u16 num_vf_queues;
+
+ if (req_vfs == 0)
+ return 0;
+
+ first_vf = bp->vfdb->sriov.first_vf_in_pf;
+
+ /* statically distribute vf sb pool between VFs */
+ num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
+ BP_VFDB(bp)->vf_sbs_pool / req_vfs);
+
+ /* zero previous values learned from igu cam */
+ for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ vf->sb_count = 0;
+ vf_sb_count(BP_VF(bp, vf_idx)) = 0;
+ }
+ bp->vfdb->vf_sbs_pool = 0;
+
+ /* prepare IGU cam */
+ sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
+ address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
+ igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
+ vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
+ IGU_REG_MAPPING_MEMORY_VALID;
+ DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
+ sb_idx, vf_idx);
+ REG_WR(bp, address, igu_entry);
+ sb_idx++;
+ address += IGU_ENTRY_SIZE;
+ }
+ }
+
+ /* Reinitialize vf database according to igu cam */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
+ BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
+
+ qcount = 0;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += vf_sb_count(vf);
+ }
+ /* prepare msix vectors in VF configuration space */
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
+ REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
+ num_vf_queues);
+ }
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* enable sriov. This will probe all the VFs, and consequentially cause
+ * the "acquire" messages to appear on the VF PF channel.
+ */
+ DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
+ pci_disable_sriov(bp->pdev);
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3072,9 +3225,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
}
-static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
- struct bnx2x_virtf **vf,
- struct pf_vf_bulletin_content **bulletin)
+int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3097,7 +3249,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
*bulletin = BP_VF_BULLETIN(bp, vfidx);
if (!*vf) {
- BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!(*vf)->vfqs) {
+ BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
vfidx);
return -EINVAL;
}
@@ -3125,8 +3283,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
- mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
- vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n");
return -EINVAL;
@@ -3138,10 +3296,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
- mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
- 0, ETH_ALEN);
- vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
- 0, VLAN_HLEN);
+ if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
+ vlan_obj->get_n_elements(bp, vlan_obj, 1,
+ (u8 *)&ivi->vlan, 0,
+ VLAN_HLEN);
} else {
/* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3209,14 +3370,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
return rc;
}
- /* is vf initialized and queue set up? */
q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bnx2x_leading_vfq(vf, mac_obj);
+
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
@@ -3276,18 +3441,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* is vf initialized and queue set up? */
q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the vlan in device on this vf's queue */
unsigned long ramrod_flags = 0;
unsigned long vlan_mac_flags = 0;
struct bnx2x_vlan_mac_obj *vlan_obj =
- &bnx2x_vfq(vf, 0, vlan_obj);
+ &bnx2x_leading_vfq(vf, vlan_obj);
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_queue_update_params *update_params;
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
memset(&ramrod_param, 0, sizeof(ramrod_param));
/* must lock vfpf channel to protect against vf flows */
@@ -3307,7 +3475,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
*/
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
update_params = &q_params.params.update;
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d143a7cdbbbe..2a8c1dc65d9c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -81,6 +81,7 @@ struct bnx2x_vf_queue {
u32 cid;
u16 index;
u16 sb_idx;
+ bool is_leading;
};
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -194,6 +195,7 @@ struct bnx2x_virtf {
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
#define VF_CFG_VLAN 0x0020
+#define VF_CFG_STATS_COALESCE 0x0040
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -213,6 +215,7 @@ struct bnx2x_virtf {
/* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ u16 stats_stride;
dma_addr_t spq_map;
dma_addr_t bulletin_map;
@@ -239,7 +242,10 @@ struct bnx2x_virtf {
u8 igu_base_id; /* base igu status block id */
struct bnx2x_vf_queue *vfqs;
-#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define LEADING_IDX 0
+#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
+#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
u8 index; /* index in the vf array */
u8 abs_vfid;
@@ -358,6 +364,10 @@ struct bnx2x_vf_sp {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
} q_data;
+
+ union {
+ struct eth_rss_update_ramrod_data e2;
+ } rss_rdata;
};
struct hw_dma {
@@ -403,6 +413,10 @@ struct bnx2x_vfdb {
#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
u32 flrd_vfs[FLRD_VFS_DWORDS];
+
+ /* the number of msix vectors belonging to this PF designated for VFs */
+ u16 vf_sbs_pool;
+ u16 first_vf_igu_entry;
};
/* queue access */
@@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
return &(vf->vfqs[index]);
}
-static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
-{
- return (vfq->index == 0);
-}
-
/* FW ids */
static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
{
@@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{
- return vfq_cl_id(vf, q);
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ return vf->leading_rss;
+ else
+ return vfq_cl_id(vf, q);
}
static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
@@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
+int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd);
+
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
@@ -730,9 +746,12 @@ int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading);
int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
@@ -758,7 +777,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp);
void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
- return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
}
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
@@ -793,7 +812,7 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
-static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 2088063151d6..6cfb88732452 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* humble our request */
req->resc_request.num_txqs =
- bp->acquire_resp.resc.num_txqs;
+ min(req->resc_request.num_txqs,
+ bp->acquire_resp.resc.num_txqs);
req->resc_request.num_rxqs =
- bp->acquire_resp.resc.num_rxqs;
+ min(req->resc_request.num_rxqs,
+ bp->acquire_resp.resc.num_rxqs);
req->resc_request.num_sbs =
- bp->acquire_resp.resc.num_sbs;
+ min(req->resc_request.num_sbs,
+ bp->acquire_resp.resc.num_sbs);
req->resc_request.num_mac_filters =
- bp->acquire_resp.resc.num_mac_filters;
+ min(req->resc_request.num_mac_filters,
+ bp->acquire_resp.resc.num_mac_filters);
req->resc_request.num_vlan_filters =
- bp->acquire_resp.resc.num_vlan_filters;
+ min(req->resc_request.num_vlan_filters,
+ bp->acquire_resp.resc.num_vlan_filters);
req->resc_request.num_mc_filters =
- bp->acquire_resp.resc.num_mc_filters;
+ min(req->resc_request.num_mc_filters,
+ bp->acquire_resp.resc.num_mc_filters);
/* Clear response buffer */
memset(&bp->vf2pf_mbox->resp, 0,
@@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->common.flash_size = 0;
bp->flags |=
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
- bp->igu_sb_cnt = 1;
+ bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
@@ -373,6 +379,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
req->stats_addr = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
+ req->stats_stride = sizeof(struct per_queue_stats);
+
/* add list termination tlv */
bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -452,12 +460,60 @@ free_irq:
bnx2x_free_irq(bp);
}
+static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->vlans_pool);
+
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* rss */
+ bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
+ func_id, func_id,
+ bnx2x_vf_sp(bp, vf, rss_rdata),
+ bnx2x_vf_sp_map(bp, vf, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ q->is_leading = true;
+}
+
/* ask the pf to open a queue for the vf */
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading)
{
struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u8 fp_idx = fp->index;
u16 tpa_agg_size = 0, flags = 0;
int rc;
@@ -473,6 +529,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
tpa_agg_size = TPA_AGG_SIZE;
}
+ if (is_leading)
+ flags |= VFPF_QUEUE_FLG_LEADING_RSS;
+
/* calculate queue flags */
flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
@@ -646,6 +705,71 @@ out:
return 0;
}
+/* request pf to config rss table for vf queues*/
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params)
+{
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
+ int rc = 0;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
+ sizeof(*req));
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
+ req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
+ req->rss_key_size = T_ETH_RSS_KEY;
+ req->rss_result_mask = params->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
+ req->rss_flags |= VFPF_RSS_MODE_DISABLED;
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
+ req->rss_flags |= VFPF_RSS_MODE_REGULAR;
+ if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
+ req->rss_flags |= VFPF_RSS_SET_SRCH;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
+ req->rss_flags |= VFPF_RSS_IPV4;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
+ req->rss_flags |= VFPF_RSS_IPV4_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
+ req->rss_flags |= VFPF_RSS_IPV4_UDP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
+ req->rss_flags |= VFPF_RSS_IPV6;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
+ req->rss_flags |= VFPF_RSS_IPV6_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
+ req->rss_flags |= VFPF_RSS_IPV6_UDP;
+
+ DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
+ resp->hdr.status);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return 0;
+}
+
int bnx2x_vfpf_set_mcast(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -948,7 +1072,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* fill in pfdev info */
resp->pfdev_info.chip_num = bp->common.chip_id;
- resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
+ resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
@@ -1054,8 +1178,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
+ vf->stats_stride = init->stats_stride;
vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+ /* set VF multiqueue statistics collection mode */
+ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
+ vf->cfg_flags |= VF_CFG_STATS_COALESCE;
+
/* response */
bnx2x_vf_mbx_resp(bp, vf);
}
@@ -1080,6 +1209,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
/* outer vlan removal is set according to PF's multi function mode */
if (IS_MF_SD(bp))
@@ -1113,6 +1244,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p;
+ if (bnx2x_vfq_is_leading(q))
+ bnx2x_leading_vfq_init(bp, vf, q);
+
/* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup;
@@ -1552,6 +1686,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_mbx_resp(bp, vf);
}
+static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+ struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
+ struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+
+ if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
+ rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
+ BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
+ vf->index);
+ vf->op_rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ /* set vfop params according to rss tlv */
+ memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
+ sizeof(rss_tlv->rss_key));
+ vf_op_params->rss_obj = &vf->rss_conf_obj;
+ vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
+ __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
+ __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
+ __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
+ __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
+ __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
+
+ if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
+ (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
+ BNX2X_ERR("about to hit a FW assert. aborting...\n");
+ vf->op_rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
+
+mbx_resp:
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
/* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
@@ -1588,6 +1784,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_RELEASE:
bnx2x_vf_mbx_release_vf(bp, vf, mbx);
break;
+ case CHANNEL_TLV_UPDATE_RSS:
+ bnx2x_vf_mbx_update_rss(bp, vf, mbx);
+ break;
}
} else {
@@ -1607,7 +1806,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* test whether we can respond to the VF (do we have an address
* for it?)
*/
- if (vf->state == VF_ACQUIRED) {
+ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
/* mbx_resp uses the op_rc of the VF */
vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index f3ad174a3a63..1179fe06d0c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -51,6 +51,7 @@ struct hw_sb_info {
#define VFPF_QUEUE_FLG_COS 0x0080
#define VFPF_QUEUE_FLG_HC 0x0100
#define VFPF_QUEUE_FLG_DHC 0x0200
+#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv {
u8 padding[3];
};
+/* receive side scaling tlv */
+struct vfpf_rss_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u32 rss_flags;
+#define VFPF_RSS_MODE_DISABLED (1 << 0)
+#define VFPF_RSS_MODE_REGULAR (1 << 1)
+#define VFPF_RSS_SET_SRCH (1 << 2)
+#define VFPF_RSS_IPV4 (1 << 3)
+#define VFPF_RSS_IPV4_TCP (1 << 4)
+#define VFPF_RSS_IPV4_UDP (1 << 5)
+#define VFPF_RSS_IPV6 (1 << 6)
+#define VFPF_RSS_IPV6_TCP (1 << 7)
+#define VFPF_RSS_IPV6_UDP (1 << 8)
+ u8 rss_result_mask;
+ u8 ind_table_size;
+ u8 rss_key_size;
+ u8 padding;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
+};
+
/* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
@@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv {
} resc;
};
+#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
+ * stats will be coalesced on
+ * the leading RSS queue
+ */
+
/* Init VF */
struct vfpf_init_tlv {
struct vfpf_first_tlv first_tlv;
aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
aligned_u64 spq_addr;
aligned_u64 stats_addr;
+ u16 stats_stride;
+ u32 flags;
+ u32 padding[2];
};
/* Setup Queue */
@@ -293,13 +323,14 @@ union vfpf_tlvs {
struct vfpf_q_op_tlv q_op;
struct vfpf_setup_q_tlv setup_q;
struct vfpf_set_q_filters_tlv set_q_filters;
- struct vfpf_release_tlv release;
- struct channel_list_end_tlv list_end;
+ struct vfpf_release_tlv release;
+ struct vfpf_rss_tlv update_rss;
+ struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
- struct pfvf_general_resp_tlv general_resp;
+ struct pfvf_general_resp_tlv general_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
@@ -355,14 +386,18 @@ enum channel_tlvs {
CHANNEL_TLV_INIT,
CHANNEL_TLV_SETUP_Q,
CHANNEL_TLV_SET_Q_FILTERS,
+ CHANNEL_TLV_ACTIVATE_Q,
+ CHANNEL_TLV_DEACTIVATE_Q,
CHANNEL_TLV_TEARDOWN_Q,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
CHANNEL_TLV_PF_RELEASE_VF,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN,
+ CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d78d4cf140ed..8142480d9770 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1184,6 +1184,7 @@ error:
static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int ctx_blk_size = cp->ethdev->ctx_blk_size;
int total_mem, blks, i;
@@ -1201,7 +1202,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
cp->ctx_blks = blks;
cp->ctx_blk_size = ctx_blk_size;
- if (!BNX2X_CHIP_IS_57710(cp->chip_id))
+ if (!CHIP_IS_E1(bp))
cp->ctx_align = 0;
else
cp->ctx_align = ctx_blk_size;
@@ -1231,6 +1232,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_eth_dev *ethdev = cp->ethdev;
u32 start_cid = ethdev->starting_cid;
int i, j, n, ret, pages;
@@ -1240,7 +1242,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
cp->max_cid_space += dev->max_fcoe_conn;
cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid)
@@ -1288,7 +1290,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
- if (CNIC_SUPPORTS_FCOE(cp)) {
+ if (CNIC_SUPPORTS_FCOE(bp)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
if (ret)
goto error;
@@ -1382,6 +1384,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
u32 type, union l5cm_specific_data *l5_data)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct l5cm_spe kwqe;
struct kwqe_16 *kwq[1];
u16 type_16;
@@ -1389,10 +1392,10 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
kwqe.hdr.conn_and_cmd_data =
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
- BNX2X_HW_CID(cp, cid)));
+ BNX2X_HW_CID(bp, cid)));
type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
- type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+ type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID;
kwqe.hdr.type = cpu_to_le16(type_16);
@@ -1427,13 +1430,34 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
rcu_read_unlock();
}
+static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
+ int en_tcp_dack)
+{
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
+ u16 tstorm_flags = 0;
+
+ if (time_stamps) {
+ xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+ tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+ }
+ if (en_tcp_dack)
+ tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
+
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
+
+ CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
+}
+
static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
int hq_bds, pages;
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
cp->num_iscsi_tasks = req1->num_tasks_per_conn;
cp->num_ccells = req1->num_ccells_per_conn;
@@ -1506,15 +1530,18 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
+ cnic_bnx2x_set_tcp_options(dev,
+ req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
+ req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
+
return 0;
}
static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
- struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
@@ -1653,6 +1680,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct iscsi_kwqe_conn_offload1 *req1 =
(struct iscsi_kwqe_conn_offload1 *) wqes[0];
struct iscsi_kwqe_conn_offload2 *req2 =
@@ -1661,11 +1689,11 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
u32 cid = ctx->cid;
- u32 hw_cid = BNX2X_HW_CID(cp, cid);
+ u32 hw_cid = BNX2X_HW_CID(bp, cid);
struct iscsi_context *ictx;
struct regpair context_addr;
int i, j, n = 2, n_max;
- u8 port = CNIC_PORT(cp);
+ u8 port = BP_PORT(bp);
ctx->ctx_flags = 0;
if (!req2->num_additional_wqes)
@@ -1719,8 +1747,8 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
ETH_P_8021Q;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
- cp->port_mode == CHIP_2_PORT_MODE) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
+ bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
port = 0;
}
@@ -1841,6 +1869,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
struct iscsi_kwqe_conn_offload1 *req1;
struct iscsi_kwqe_conn_offload2 *req2;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_context *ctx;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
@@ -1894,7 +1923,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
}
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
- kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
+ kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
done:
cqes[0] = (struct kcqe *) &kcqe;
@@ -1930,6 +1959,7 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
union l5cm_specific_data l5_data;
int ret;
@@ -1938,7 +1968,7 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data));
- hw_cid = BNX2X_HW_CID(cp, ctx->cid);
+ hw_cid = BNX2X_HW_CID(bp, ctx->cid);
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
hw_cid, NONE_CONNECTION_TYPE, &l5_data);
@@ -2035,9 +2065,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
xstorm_buf->pseudo_header_checksum =
swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
- if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
- tstorm_buf->params |=
- L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
if (kwqe3->ka_timeout) {
tstorm_buf->ka_enable = 1;
tstorm_buf->ka_timeout = kwqe3->ka_timeout;
@@ -2049,9 +2076,8 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
{
- struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
u8 *mac = dev->mac_addr;
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
@@ -2084,25 +2110,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
mac[0]);
}
-static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
-{
- struct cnic_local *cp = dev->cnic_priv;
- struct bnx2x *bp = netdev_priv(dev->netdev);
- u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
- u16 tstorm_flags = 0;
-
- if (tcp_ts) {
- xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
- tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
- }
-
- CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
-
- CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
-}
-
static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
@@ -2176,10 +2183,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
-
- cnic_bnx2x_set_tcp_timestamp(dev,
- kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
+ XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
@@ -2248,11 +2252,12 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
struct fcoe_stat_ramrod_params *fcoe_stat;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int ret;
u32 cid;
req = (struct fcoe_kwqe_stat *) kwqe;
- cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
if (!fcoe_stat)
@@ -2271,6 +2276,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
{
int ret;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 cid;
struct fcoe_init_ramrod_params *fcoe_init;
struct fcoe_kwqe_init1 *req1;
@@ -2315,7 +2321,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
cp->kcq2.sw_prod_idx = 0;
- cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data);
*work = 3;
@@ -2328,6 +2334,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
int ret = 0;
u32 cid = -1, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct fcoe_kwqe_conn_offload1 *req1;
struct fcoe_kwqe_conn_offload2 *req2;
struct fcoe_kwqe_conn_offload3 *req3;
@@ -2370,7 +2377,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
if (fctx) {
- u32 hw_cid = BNX2X_HW_CID(cp, cid);
+ u32 hw_cid = BNX2X_HW_CID(bp, cid);
u32 val;
val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
@@ -2394,7 +2401,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
- cid = BNX2X_HW_CID(cp, cid);
+ cid = BNX2X_HW_CID(bp, cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
if (!ret)
@@ -2552,13 +2559,14 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int ret;
u32 cid;
cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
req = (struct fcoe_kwqe_destroy *) kwqe;
- cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
@@ -2715,7 +2723,7 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
- struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int i, work, ret;
u32 opcode;
struct kwqe *kwqe;
@@ -2723,7 +2731,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
- if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ if (!BNX2X_CHIP_IS_E2_PLUS(bp))
return -EINVAL;
for (i = 0; i < num_wqes; ) {
@@ -3039,8 +3047,8 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
u16 index, u8 op, u8 update)
{
- struct cnic_local *cp = dev->cnic_priv;
- u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
COMMAND_REG_INT_ACK);
struct igu_ack_register igu_ack;
@@ -3603,6 +3611,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
csk1->rcv_buf = DEF_RCV_BUF;
csk1->snd_buf = DEF_SND_BUF;
csk1->seed = DEF_SEED;
+ csk1->tcp_flags = 0;
*csk = csk1;
return 0;
@@ -4020,15 +4029,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
cnic_cm_upcall(cp, csk, opcode);
break;
- case L5CM_RAMROD_CMD_ID_CLOSE:
- if (l4kcqe->status != 0) {
- netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
- "status 0x%x\n", l4kcqe->status);
+ case L5CM_RAMROD_CMD_ID_CLOSE: {
+ struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
+
+ if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
+ l4kcqe->status, l5kcqe->completion_status);
opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
/* Fall through */
} else {
break;
}
+ }
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4213,13 +4225,12 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
{
- struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
- u32 port = CNIC_PORT(cp);
+ u32 pfid = bp->pfid;
+ u32 port = BP_PORT(bp);
cnic_init_bnx2x_mac(dev);
- cnic_bnx2x_set_tcp_timestamp(dev, 1);
+ cnic_bnx2x_set_tcp_options(dev, 0, 1);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
@@ -4897,6 +4908,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
@@ -4925,7 +4937,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ if (BNX2X_CHIP_IS_E2_PLUS(bp))
pbd_e2->parsing_data = (UNICAST_ADDRESS <<
ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
else
@@ -4962,6 +4974,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
BNX2_PAGE_SIZE);
@@ -4970,7 +4983,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
- int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+ int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
u32 val;
dma_addr_t ring_map = udev->l2_ring_map;
@@ -4979,7 +4992,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
data->general.activate_flg = 1;
data->general.sp_client_id = cli;
data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
- data->general.func_id = cp->pfid;
+ data->general.func_id = bp->pfid;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
@@ -5029,13 +5042,13 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr =
@@ -5051,7 +5064,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
&sb->sb.running_index[SM_RX_ID];
}
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@@ -5073,12 +5086,10 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
u32 pfid;
dev->stats_addr = ethdev->addr_drv_info_to_mcp;
- cp->port_mode = bp->common.chip_port_mode;
- cp->pfid = bp->pfid;
cp->func = bp->pf_num;
func = CNIC_FUNC(cp);
- pfid = cp->pfid;
+ pfid = bp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
cp->iscsi_start_cid, 0);
@@ -5086,7 +5097,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret)
return -ENOMEM;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
cp->fcoe_start_cid, 0);
@@ -5168,12 +5179,12 @@ static void cnic_init_rings(struct cnic_dev *dev)
rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
barrier();
- cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+ cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
off = BAR_USTRORM_INTMEM +
- (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
+ (BNX2X_CHIP_IS_E2_PLUS(bp) ?
USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
- USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
+ USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
@@ -5271,6 +5282,13 @@ static int cnic_register_netdev(struct cnic_dev *dev)
if (err)
netdev_err(dev->netdev, "register_cnic failed\n");
+ /* Read iSCSI config again. On some bnx2x device, iSCSI config
+ * can change after firmware is downloaded.
+ */
+ dev->max_iscsi_conn = ethdev->max_iscsi_conn;
+ if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+ dev->max_iscsi_conn = 0;
+
return err;
}
@@ -5353,7 +5371,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_free_irq(dev);
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
idx_off = offsetof(struct hc_status_block_e2, index_values) +
(hc_index * sizeof(u16));
@@ -5370,7 +5388,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
*cp->kcq1.hw_prod_idx_ptr = 0;
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
+ CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
CNIC_WR16(dev, cp->kcq1.io_addr, 0);
cnic_free_resc(dev);
}
@@ -5544,7 +5562,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
- if (CNIC_SUPPORTS_FCOE(cp)) {
+ if (CNIC_SUPPORTS_FCOE(bp)) {
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
}
@@ -5564,7 +5582,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
cp->ack_int = cnic_ack_bnx2x_e2_msix;
cp->arm_int = cnic_arm_bnx2x_e2_msix;
} else {
@@ -5628,7 +5646,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
dev = cnic_from_netdev(netdev);
- if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
+ if (!dev && event == NETDEV_REGISTER) {
/* Check for the hot-plug device */
dev = is_cnic_dev(netdev);
if (dev) {
@@ -5644,7 +5662,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev);
- if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
+ if (event == NETDEV_UP) {
if (cnic_register_netdev(dev) != 0) {
cnic_put(dev);
goto done;
@@ -5693,21 +5711,8 @@ static struct notifier_block cnic_netdev_notifier = {
static void cnic_release(void)
{
- struct cnic_dev *dev;
struct cnic_uio_dev *udev;
- while (!list_empty(&cnic_dev_list)) {
- dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
- if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
- cnic_ulp_stop(dev);
- cnic_stop_hw(dev);
- }
-
- cnic_ulp_exit(dev);
- cnic_unregister_netdev(dev);
- list_del_init(&dev->list);
- cnic_free_dev(dev);
- }
while (!list_empty(&cnic_udev_list)) {
udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
list);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 62c670619ae6..0121a5d55192 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2011 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -303,8 +303,6 @@ struct cnic_local {
u32 chip_id;
int func;
- u32 pfid;
- u8 port_mode;
u32 shmem_base;
@@ -364,47 +362,7 @@ struct bnx2x_bd_chain_next {
#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
-#define BNX2X_CHIP_NUM_57710 0x164e
-#define BNX2X_CHIP_NUM_57711 0x164f
-#define BNX2X_CHIP_NUM_57711E 0x1650
-#define BNX2X_CHIP_NUM_57712 0x1662
-#define BNX2X_CHIP_NUM_57712E 0x1663
-#define BNX2X_CHIP_NUM_57713 0x1651
-#define BNX2X_CHIP_NUM_57713E 0x1652
-#define BNX2X_CHIP_NUM_57800 0x168a
-#define BNX2X_CHIP_NUM_57810 0x168e
-#define BNX2X_CHIP_NUM_57840 0x168d
-
-#define BNX2X_CHIP_NUM(x) (x >> 16)
-#define BNX2X_CHIP_IS_57710(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
-#define BNX2X_CHIP_IS_57711(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
-#define BNX2X_CHIP_IS_57711E(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
-#define BNX2X_CHIP_IS_E1H(x) \
- (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
-#define BNX2X_CHIP_IS_57712(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
-#define BNX2X_CHIP_IS_57712E(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
-#define BNX2X_CHIP_IS_57713(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
-#define BNX2X_CHIP_IS_57713E(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
-#define BNX2X_CHIP_IS_57800(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
-#define BNX2X_CHIP_IS_57810(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
-#define BNX2X_CHIP_IS_57840(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
-#define BNX2X_CHIP_IS_E2(x) \
- (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
- BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
-#define BNX2X_CHIP_IS_E3(x) \
- (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
- BNX2X_CHIP_IS_57840(x))
-#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
+#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
sizeof(struct eth_rx_bd))
@@ -439,31 +397,26 @@ struct bnx2x_bd_chain_next {
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
-#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func)
-#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
- 0 : (CNIC_FUNC(cp) & 1))
-#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
-#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
- (CNIC_E1HVN(cp) << 17) | (x))
+#define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
+ (BP_VN(bp) << 17) | (x))
#define BNX2X_SW_CID(x) (x & 0x1ffff)
-#define BNX2X_CL_QZONE_ID(cp, cli) \
- (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \
- cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
+#define BNX2X_CL_QZONE_ID(bp, cli) \
+ (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \
+ cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H))
#ifndef MAX_STAT_COUNTER_ID
#define MAX_STAT_COUNTER_ID \
- (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \
- ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
+ (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \
+ ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \
MAX_STAT_COUNTER_ID_E1))
#endif
-#define CNIC_SUPPORTS_FCOE(cp) \
- (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \
- !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+#define CNIC_SUPPORTS_FCOE(cp) \
+ (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
#define CNIC_RAMROD_TMO (HZ / 4)
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index ede3db35d757..95a8e4b11c9f 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags {
u16 flags;
#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
-#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12)
-#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12)
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12
#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ec9bb9ad4bb3..0658b43e148c 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.16"
-#define CNIC_MODULE_RELDATE "Dec 05, 2012"
+#define CNIC_MODULE_VERSION "2.5.18"
+#define CNIC_MODULE_RELDATE "Sept 01, 2013"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -238,8 +238,8 @@ struct cnic_sock {
u16 src_port;
u16 dst_port;
u16 vlan_id;
- unsigned char old_ha[6];
- unsigned char ha[6];
+ unsigned char old_ha[ETH_ALEN];
+ unsigned char ha[ETH_ALEN];
u32 mtu;
u32 cid;
u32 l5_cid;
@@ -308,7 +308,7 @@ struct cnic_dev {
#define CNIC_F_BNX2_CLASS 3
#define CNIC_F_BNX2X_CLASS 4
atomic_t ref_count;
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
int max_iscsi_conn;
int max_fcoe_conn;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 0da2214ef1b9..5701f3d1a169 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 132
+#define TG3_MIN_NUM 133
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "May 21, 2013"
+#define DRV_MODULE_RELDATE "Jul 29, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
return false;
}
+static bool tg3_phy_led_bug(struct tg3 *tp)
+{
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5719:
+ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ !tp->pci_fn)
+ return true;
+ return false;
+ }
+
+ return false;
+}
+
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{
u32 val;
@@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
}
return;
} else if (do_low_power) {
- tg3_writephy(tp, MII_TG3_EXT_CTRL,
- MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+ if (!tg3_phy_led_bug(tp))
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_FORCE_LED_OFF);
val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
@@ -4226,8 +4240,6 @@ static int tg3_power_down_prepare(struct tg3 *tp)
static void tg3_power_down(struct tg3 *tp)
{
- tg3_power_down_prepare(tp);
-
pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
pci_set_power_state(tp->pdev, PCI_D3hot);
}
@@ -6095,10 +6107,12 @@ static u64 tg3_refclk_read(struct tg3 *tp)
/* tp->lock must be held */
static void tg3_refclk_write(struct tg3 *tp, u64 newval)
{
- tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
+ u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+
+ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
- tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
+ tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
}
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
@@ -6214,6 +6228,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp,
static int tg3_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+ u32 clock_ctl;
+ int rval = 0;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (rq->perout.index != 0)
+ return -EINVAL;
+
+ tg3_full_lock(tp, 0);
+ clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+ clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
+
+ if (on) {
+ u64 nsec;
+
+ nsec = rq->perout.start.sec * 1000000000ULL +
+ rq->perout.start.nsec;
+
+ if (rq->perout.period.sec || rq->perout.period.nsec) {
+ netdev_warn(tp->dev,
+ "Device supports only a one-shot timesync output, period must be 0\n");
+ rval = -EINVAL;
+ goto err_out;
+ }
+
+ if (nsec & (1ULL << 63)) {
+ netdev_warn(tp->dev,
+ "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
+ rval = -EINVAL;
+ goto err_out;
+ }
+
+ tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
+ tw32(TG3_EAV_WATCHDOG0_MSB,
+ TG3_EAV_WATCHDOG0_EN |
+ ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
+
+ tw32(TG3_EAV_REF_CLCK_CTL,
+ clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
+ } else {
+ tw32(TG3_EAV_WATCHDOG0_MSB, 0);
+ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
+ }
+
+err_out:
+ tg3_full_unlock(tp);
+ return rval;
+
+ default:
+ break;
+ }
+
return -EOPNOTSUPP;
}
@@ -6223,7 +6290,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.max_adj = 250000000,
.n_alarm = 0,
.n_ext_ts = 0,
- .n_per_out = 0,
+ .n_per_out = 1,
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
@@ -8538,10 +8605,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
- tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
}
@@ -8590,10 +8657,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping, GFP_KERNEL);
if (!tp->hw_stats)
goto err_out;
@@ -8601,10 +8667,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
- tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
- TG3_HW_STATUS_SIZE,
- &tnapi->status_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
if (!tnapi->hw_status)
goto err_out;
@@ -10367,6 +10433,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_flag(tp, 5755_PLUS))
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
+
if (tg3_flag(tp, ENABLE_RSS))
tp->rx_mode |= RX_MODE_RSS_ENABLE |
RX_MODE_RSS_ITBL_HASH_BITS_7 |
@@ -11502,7 +11571,7 @@ static int tg3_close(struct net_device *dev)
memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
- tg3_power_down(tp);
+ tg3_power_down_prepare(tp);
tg3_carrier_off(tp);
@@ -11724,9 +11793,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (tg3_flag(tp, NO_NVRAM))
return -EINVAL;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- return -EAGAIN;
-
offset = eeprom->offset;
len = eeprom->len;
eeprom->len = 0;
@@ -11784,9 +11850,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *buf;
__be32 start, end;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- return -EAGAIN;
-
if (tg3_flag(tp, NO_NVRAM) ||
eeprom->magic != TG3_EEPROM_MAGIC)
return -EINVAL;
@@ -13515,7 +13578,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_phy_start(tp);
}
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- tg3_power_down(tp);
+ tg3_power_down_prepare(tp);
}
@@ -15917,7 +15980,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
*/
if (tg3_flag(tp, 5780_CLASS)) {
tg3_flag_set(tp, 40BIT_DMA_BUG);
- tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
+ tp->msi_cap = tp->pdev->msi_cap;
} else {
struct pci_dev *bridge = NULL;
@@ -17547,11 +17610,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, PTP_CAPABLE);
- if (tg3_flag(tp, 5717_PLUS)) {
- /* Resume a low-power mode */
- tg3_frob_aux_power(tp, false);
- }
-
tg3_timer_init(tp);
tg3_carrier_off(tp);
@@ -17755,6 +17813,23 @@ out:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+static void tg3_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+
+ rtnl_lock();
+ netif_device_detach(dev);
+
+ if (netif_running(dev))
+ dev_close(dev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ tg3_power_down(tp);
+
+ rtnl_unlock();
+}
+
/**
* tg3_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -17914,6 +17989,7 @@ static struct pci_driver tg3_driver = {
.remove = tg3_remove_one,
.err_handler = &tg3_err_handler,
.driver.pm = &tg3_pm_ops,
+ .shutdown = tg3_shutdown,
};
module_pci_driver(tg3_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index cd63d1189aae..ddb8be1298ea 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -532,6 +532,7 @@
#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000
#define RX_MODE_RSS_ENABLE 0x00800000
#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000
+#define RX_MODE_IPV4_FRAG_FIX 0x02000000
#define MAC_RX_STATUS 0x0000046c
#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001
#define RX_STATUS_XOFF_RCVD 0x00000002
@@ -1818,12 +1819,21 @@
#define TG3_EAV_REF_CLCK_CTL 0x00006908
#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002
#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004
+#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16)
+#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17)
+
+#define TG3_EAV_WATCHDOG0_LSB 0x00006918
+#define TG3_EAV_WATCHDOG0_MSB 0x0000691c
+#define TG3_EAV_WATCHDOG0_EN (1 << 31)
+#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff
+
#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928
#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31)
#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30)
#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff
-/* 0x690c --> 0x7000 unused */
+
+/* 0x692c --> 0x7000 unused */
/* NVRAM Control registers */
#define NVRAM_CMD 0x00007000
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 57cd1bff59f1..3c07064b2bc4 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1419,7 +1419,7 @@ static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
bna_bfi_rx_enet_start(rx);
}
-void
+static void
bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
{
}
@@ -1472,7 +1472,7 @@ static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
bna_rxf_start(&rx->rxf);
}
-void
+static void
bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
{
}
@@ -1528,7 +1528,7 @@ bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
}
}
-void
+static void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
@@ -1593,12 +1593,12 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
}
}
-void
+static void
bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
{
}
-void
+static void
bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index c37f706d9992..43405f654b4a 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin"
-#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index bb5d63fb2e6d..ce75de9bae9e 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
/* Detect MAC & PHY and perform ethernet interface initialization */
static int __init at91ether_probe(struct platform_device *pdev)
{
- struct macb_platform_data *board_data = pdev->dev.platform_data;
+ struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
struct resource *regs;
struct net_device *dev;
struct phy_device *phydev;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index e866608d7d91..92578690f6de 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -27,6 +27,7 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pinctrl/consumer.h>
@@ -124,7 +125,7 @@ void macb_get_hwaddr(struct macb *bp)
u8 addr[6];
int i;
- pdata = bp->pdev->dev.platform_data;
+ pdata = dev_get_platdata(&bp->pdev->dev);
/* Check all 4 address register for vaild address */
for (i = 0; i < 4; i++) {
@@ -275,7 +276,7 @@ static int macb_mii_probe(struct net_device *dev)
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
netdev_err(dev, "no PHY found\n");
- return -1;
+ return -ENXIO;
}
pdata = dev_get_platdata(&bp->pdev->dev);
@@ -314,6 +315,7 @@ static int macb_mii_probe(struct net_device *dev)
int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
+ struct device_node *np;
int err = -ENXIO, i;
/* Enable management port */
@@ -333,10 +335,7 @@ int macb_mii_init(struct macb *bp)
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->dev->dev;
- pdata = bp->pdev->dev.platform_data;
-
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
+ pdata = dev_get_platdata(&bp->pdev->dev);
bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!bp->mii_bus->irq) {
@@ -344,17 +343,45 @@ int macb_mii_init(struct macb *bp)
goto err_out_free_mdiobus;
}
- for (i = 0; i < PHY_MAX_ADDR; i++)
- bp->mii_bus->irq[i] = PHY_POLL;
-
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
- if (mdiobus_register(bp->mii_bus))
+ np = bp->pdev->dev.of_node;
+ if (np) {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ found during dt phy registration */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
+ break;
+ }
+ }
+
+ if (err)
+ goto err_out_unregister_bus;
+ }
+ } else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
+
+ err = mdiobus_register(bp->mii_bus);
+ }
+
+ if (err)
goto err_out_free_mdio_irq;
- if (macb_mii_probe(bp->dev) != 0) {
+ err = macb_mii_probe(bp->dev);
+ if (err)
goto err_out_unregister_bus;
- }
return 0;
@@ -1824,7 +1851,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = of_get_phy_mode(pdev->dev.of_node);
if (err < 0) {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->is_rmii)
bp->phy_interface = PHY_INTERFACE_MODE_RMII;
else
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 7cb148c495c9..78d6d6b970e1 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -353,11 +353,9 @@ struct xgmac_extra_stats {
/* Receive errors */
unsigned long rx_watchdog;
unsigned long rx_da_filter_fail;
- unsigned long rx_sa_filter_fail;
unsigned long rx_payload_error;
unsigned long rx_ip_header_error;
/* Tx/Rx IRQ errors */
- unsigned long tx_undeflow;
unsigned long tx_process_stopped;
unsigned long rx_buf_unav;
unsigned long rx_process_stopped;
@@ -393,6 +391,7 @@ struct xgmac_priv {
char rx_pause;
char tx_pause;
int wolopts;
+ struct work_struct tx_timeout_work;
};
/* XGMAC Configuration Settings */
@@ -409,6 +408,9 @@ struct xgmac_priv {
#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
+#define tx_dma_ring_space(p) \
+ dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
+
/* XGMAC Descriptor Access Helpers */
static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
{
@@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
{
- u32 len = cpu_to_le32(p->flags);
+ u32 len = le32_to_cpu(p->buf_size);
return (len & DESC_BUFFER1_SZ_MASK) +
((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
}
@@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
p->flags = cpu_to_le32(tmpflags);
}
+static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
+{
+ u32 tmpflags = le32_to_cpu(p->flags);
+ tmpflags &= TXDESC_END_RING;
+ p->flags = cpu_to_le32(tmpflags);
+}
+
static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
{
return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
}
+static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
+}
+
static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
{
return le32_to_cpu(p->buf1_addr);
@@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
{
u32 data;
- data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
- writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+ if (addr) {
+ data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+ writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+ } else {
+ writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
+ writel(0, ioaddr + XGMAC_ADDR_LOW(num));
+ }
}
static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
@@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
if (unlikely(skb == NULL))
break;
- priv->rx_skbuff[entry] = skb;
paddr = dma_map_single(priv->device, skb->data,
- bufsz, DMA_FROM_DEVICE);
+ priv->dma_buf_sz - NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ priv->rx_skbuff[entry] = skb;
desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
}
@@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
return;
for (i = 0; i < DMA_RX_RING_SZ; i++) {
- if (priv->rx_skbuff[i] == NULL)
+ struct sk_buff *skb = priv->rx_skbuff[i];
+ if (skb == NULL)
continue;
p = priv->dma_rx + i;
dma_unmap_single(priv->device, desc_get_buf_addr(p),
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
priv->rx_skbuff[i] = NULL;
}
}
static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
{
- int i, f;
+ int i;
struct xgmac_dma_desc *p;
if (!priv->tx_skbuff)
@@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
continue;
p = priv->dma_tx + i;
- dma_unmap_single(priv->device, desc_get_buf_addr(p),
- desc_get_buf_len(p), DMA_TO_DEVICE);
-
- for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
- p = priv->dma_tx + i++;
+ if (desc_get_tx_fs(p))
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ else
dma_unmap_page(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
- }
- dev_kfree_skb_any(priv->tx_skbuff[i]);
+ if (desc_get_tx_ls(p))
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
}
@@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
*/
static void xgmac_tx_complete(struct xgmac_priv *priv)
{
- int i;
-
while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
unsigned int entry = priv->tx_tail;
struct sk_buff *skb = priv->tx_skbuff[entry];
@@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
if (desc_get_owner(p))
break;
- /* Verify tx error by looking at the last segment */
- if (desc_get_tx_ls(p))
- desc_get_tx_status(priv, p);
-
netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
priv->tx_head, priv->tx_tail);
- dma_unmap_single(priv->device, desc_get_buf_addr(p),
- desc_get_buf_len(p), DMA_TO_DEVICE);
-
- priv->tx_skbuff[entry] = NULL;
- priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
-
- if (!skb) {
- continue;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
- DMA_TX_RING_SZ);
- p = priv->dma_tx + priv->tx_tail;
-
+ if (desc_get_tx_fs(p))
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ else
dma_unmap_page(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ /* Check tx error on the last segment */
+ if (desc_get_tx_ls(p)) {
+ desc_get_tx_status(priv, p);
+ dev_kfree_skb(skb);
}
- dev_kfree_skb(skb);
+ priv->tx_skbuff[entry] = NULL;
+ priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
}
- if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
- MAX_SKB_FRAGS)
+ /* Ensure tx_tail is visible to xgmac_xmit */
+ smp_mb();
+ if (unlikely(netif_queue_stopped(priv->dev) &&
+ (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
netif_wake_queue(priv->dev);
}
-/**
- * xgmac_tx_err:
- * @priv: pointer to the private device structure
- * Description: it cleans the descriptors and restarts the transmission
- * in case of errors.
- */
-static void xgmac_tx_err(struct xgmac_priv *priv)
+static void xgmac_tx_timeout_work(struct work_struct *work)
{
- u32 reg, value, inten;
+ u32 reg, value;
+ struct xgmac_priv *priv =
+ container_of(work, struct xgmac_priv, tx_timeout_work);
- netif_stop_queue(priv->dev);
+ napi_disable(&priv->napi);
- inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ netif_tx_lock(priv->dev);
+
reg = readl(priv->base + XGMAC_DMA_CONTROL);
writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
do {
@@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
priv->base + XGMAC_DMA_STATUS);
- writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+ netif_tx_unlock(priv->dev);
netif_wake_queue(priv->dev);
+
+ napi_enable(&priv->napi);
+
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
}
static int xgmac_hw_init(struct net_device *dev)
@@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev)
DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
- /* Enable interrupts */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+ writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
/* Mask power mgt interrupt */
writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
@@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev)
napi_enable(&priv->napi);
netif_start_queue(dev);
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
return 0;
}
@@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, paddr)) {
dev_kfree_skb(skb);
- return -EIO;
+ return NETDEV_TX_OK;
}
priv->tx_skbuff[entry] = skb;
desc_set_buf_addr_and_size(desc, paddr, len);
@@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
paddr = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, paddr)) {
- dev_kfree_skb(skb);
- return -EIO;
- }
+ if (dma_mapping_error(priv->device, paddr))
+ goto dma_err;
entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
desc = priv->dma_tx + entry;
- priv->tx_skbuff[entry] = NULL;
+ priv->tx_skbuff[entry] = skb;
desc_set_buf_addr_and_size(desc, paddr, len);
if (i < (nfrags - 1))
@@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+ writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
- writel(1, priv->base + XGMAC_DMA_TX_POLL);
- if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
- MAX_SKB_FRAGS)
+ /* Ensure tx_head update is visible to tx completion */
+ smp_mb();
+ if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
netif_stop_queue(dev);
+ /* Ensure netif_stop_queue is visible to tx completion */
+ smp_mb();
+ if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
+ netif_start_queue(dev);
+ }
+ return NETDEV_TX_OK;
+dma_err:
+ entry = priv->tx_head;
+ for ( ; i > 0; i--) {
+ entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+ desc = priv->dma_tx + entry;
+ priv->tx_skbuff[entry] = NULL;
+ dma_unmap_page(priv->device, desc_get_buf_addr(desc),
+ desc_get_buf_len(desc), DMA_TO_DEVICE);
+ desc_clear_tx_owner(desc);
+ }
+ desc = first;
+ dma_unmap_single(priv->device, desc_get_buf_addr(desc),
+ desc_get_buf_len(desc), DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
skb_put(skb, frame_len);
dma_unmap_single(priv->device, desc_get_buf_addr(p),
- frame_len, DMA_FROM_DEVICE);
+ priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = ip_checksum;
@@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
static void xgmac_tx_timeout(struct net_device *dev)
{
struct xgmac_priv *priv = netdev_priv(dev);
-
- /* Clear Tx resources and restart transmitting again */
- xgmac_tx_err(priv);
+ schedule_work(&priv->tx_timeout_work);
}
/**
@@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
use_hash = true;
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+ } else {
+ use_hash = false;
}
netdev_for_each_mc_addr(ha, dev) {
if (use_hash) {
@@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
}
out:
+ for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
+ xgmac_set_mac_addr(ioaddr, NULL, reg);
for (i = 0; i < XGMAC_NUM_HASH; i++)
writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
@@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
{
u32 intr_status;
- bool tx_err = false;
struct net_device *dev = (struct net_device *)dev_id;
struct xgmac_priv *priv = netdev_priv(dev);
struct xgmac_extra_stats *x = &priv->xstats;
@@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
if (intr_status & DMA_STATUS_TPS) {
netdev_err(priv->dev, "transmit process stopped\n");
x->tx_process_stopped++;
- tx_err = true;
+ schedule_work(&priv->tx_timeout_work);
}
if (intr_status & DMA_STATUS_FBI) {
netdev_err(priv->dev, "fatal bus error\n");
x->fatal_bus_error++;
- tx_err = true;
}
-
- if (tx_err)
- xgmac_tx_err(priv);
}
/* TX/RX NORMAL interrupts */
@@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = {
XGMAC_STAT(rx_payload_error),
XGMAC_STAT(rx_ip_header_error),
XGMAC_STAT(rx_da_filter_fail),
- XGMAC_STAT(rx_sa_filter_fail),
XGMAC_STAT(fatal_bus_error),
XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
@@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->netdev_ops = &xgmac_netdev_ops;
SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
spin_lock_init(&priv->stats_lock);
+ INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
priv->device = &pdev->dev;
priv->dev = ndev;
@@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev)
if (device_can_wakeup(priv->device))
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
- ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 038df4b96139..79ac77cf62d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3982,7 +3982,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
struct inet6_ifaddr *ifa = data;
struct net_device *event_dev;
int ret = NOTIFY_DONE;
- int cnt;
struct bonding *bond = netdev_priv(ifa->idev->dev);
struct slave *slave;
struct pci_dev *first_pdev = NULL;
@@ -3996,7 +3995,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
* in all of them only once.
*/
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, cnt) {
+ bond_for_each_slave(bond, slave) {
if (!first_pdev) {
ret = clip_add(slave->dev, ifa, event);
/* If clip_add is success then only initialize
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e3d4ec836f8b..ec88de4ac162 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -814,7 +814,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
if (pdev == NULL)
return -ENODEV;
- data = pdev->dev.platform_data;
+ data = dev_get_platdata(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index 9d4974bba247..239e1e46545d 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o
+ enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
+ enic_ethtool.o enic_api.o
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index afe9b1662b8c..e9f7c656ddda 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,12 +32,12 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.39"
-#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
+#define DRV_VERSION "2.1.1.50"
+#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 1
+#define ENIC_WQ_MAX 8
#define ENIC_RQ_MAX 8
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
@@ -96,6 +96,7 @@ struct enic {
#ifdef CONFIG_PCI_IOV
u16 num_vfs;
#endif
+ spinlock_t enic_api_lock;
struct enic_port_profile *pp;
/* work queue cache line section */
@@ -127,9 +128,57 @@ static inline struct device *enic_get_dev(struct enic *enic)
return &(enic->pdev->dev);
}
+static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
+{
+ return rq;
+}
+
+static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
+{
+ return enic->rq_count + wq;
+}
+
+static inline unsigned int enic_legacy_io_intr(void)
+{
+ return 0;
+}
+
+static inline unsigned int enic_legacy_err_intr(void)
+{
+ return 1;
+}
+
+static inline unsigned int enic_legacy_notify_intr(void)
+{
+ return 2;
+}
+
+static inline unsigned int enic_msix_rq_intr(struct enic *enic,
+ unsigned int rq)
+{
+ return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_wq_intr(struct enic *enic,
+ unsigned int wq)
+{
+ return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_err_intr(struct enic *enic)
+{
+ return enic->rq_count + enic->wq_count;
+}
+
+static inline unsigned int enic_msix_notify_intr(struct enic *enic)
+{
+ return enic->rq_count + enic->wq_count + 1;
+}
+
void enic_reset_addr_lists(struct enic *enic);
int enic_sriov_enabled(struct enic *enic);
int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
+void enic_set_ethtool_ops(struct net_device *netdev);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
new file mode 100644
index 000000000000..e13efbdaa2ed
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -0,0 +1,48 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_api.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+ int err;
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_dev *vdev = enic->vdev;
+
+ spin_lock(&enic->enic_api_lock);
+ spin_lock(&enic->devcmd_lock);
+
+ vnic_dev_cmd_proxy_by_index_start(vdev, vf);
+ err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
+ vnic_dev_cmd_proxy_end(vdev);
+
+ spin_unlock(&enic->devcmd_lock);
+ spin_unlock(&enic->enic_api_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(enic_api_devcmd_proxy_by_index);
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
new file mode 100644
index 000000000000..6b9f9255af28
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.h
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __ENIC_API_H__
+#define __ENIC_API_H__
+
+#include <linux/netdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait);
+
+#endif
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 08bded051b93..129b14a4efb0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -20,6 +20,7 @@
#define _ENIC_DEV_H_
#include "vnic_dev.h"
+#include "vnic_vic.h"
/*
* Calls the devcmd function given by argument vnicdevcmdfn.
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
new file mode 100644
index 000000000000..47e3562f4866
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -0,0 +1,257 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+struct enic_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+#define ENIC_TX_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
+}
+
+#define ENIC_RX_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
+}
+
+static const struct enic_stat enic_tx_stats[] = {
+ ENIC_TX_STAT(tx_frames_ok),
+ ENIC_TX_STAT(tx_unicast_frames_ok),
+ ENIC_TX_STAT(tx_multicast_frames_ok),
+ ENIC_TX_STAT(tx_broadcast_frames_ok),
+ ENIC_TX_STAT(tx_bytes_ok),
+ ENIC_TX_STAT(tx_unicast_bytes_ok),
+ ENIC_TX_STAT(tx_multicast_bytes_ok),
+ ENIC_TX_STAT(tx_broadcast_bytes_ok),
+ ENIC_TX_STAT(tx_drops),
+ ENIC_TX_STAT(tx_errors),
+ ENIC_TX_STAT(tx_tso),
+};
+
+static const struct enic_stat enic_rx_stats[] = {
+ ENIC_RX_STAT(rx_frames_ok),
+ ENIC_RX_STAT(rx_frames_total),
+ ENIC_RX_STAT(rx_unicast_frames_ok),
+ ENIC_RX_STAT(rx_multicast_frames_ok),
+ ENIC_RX_STAT(rx_broadcast_frames_ok),
+ ENIC_RX_STAT(rx_bytes_ok),
+ ENIC_RX_STAT(rx_unicast_bytes_ok),
+ ENIC_RX_STAT(rx_multicast_bytes_ok),
+ ENIC_RX_STAT(rx_broadcast_bytes_ok),
+ ENIC_RX_STAT(rx_drop),
+ ENIC_RX_STAT(rx_no_bufs),
+ ENIC_RX_STAT(rx_errors),
+ ENIC_RX_STAT(rx_rss),
+ ENIC_RX_STAT(rx_crc_errors),
+ ENIC_RX_STAT(rx_frames_64),
+ ENIC_RX_STAT(rx_frames_127),
+ ENIC_RX_STAT(rx_frames_255),
+ ENIC_RX_STAT(rx_frames_511),
+ ENIC_RX_STAT(rx_frames_1023),
+ ENIC_RX_STAT(rx_frames_1518),
+ ENIC_RX_STAT(rx_frames_to_max),
+};
+
+static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
+static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+
+static int enic_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if (netif_carrier_ok(netdev)) {
+ ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static void enic_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_devcmd_fw_info *fw_info;
+
+ enic_dev_fw_info(enic, &fw_info);
+
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, fw_info->fw_version,
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void enic_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < enic_n_tx_stats; i++) {
+ memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < enic_n_rx_stats; i++) {
+ memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int enic_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return enic_n_tx_stats + enic_n_rx_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void enic_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_stats *vstats;
+ unsigned int i;
+
+ enic_dev_stats_dump(enic, &vstats);
+
+ for (i = 0; i < enic_n_tx_stats; i++)
+ *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
+ for (i = 0; i < enic_n_rx_stats; i++)
+ *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
+}
+
+static u32 enic_get_msglevel(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ return enic->msg_enable;
+}
+
+static void enic_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct enic *enic = netdev_priv(netdev);
+ enic->msg_enable = value;
+}
+
+static int enic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int enic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+ unsigned int i, intr;
+
+ tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ intr = enic_legacy_io_intr();
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ tx_coalesce_usecs);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[0],
+ tx_coalesce_usecs);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->wq_count; i++) {
+ intr = enic_msix_wq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ tx_coalesce_usecs);
+ }
+
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ rx_coalesce_usecs);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ enic->rx_coalesce_usecs = rx_coalesce_usecs;
+
+ return 0;
+}
+
+static const struct ethtool_ops enic_ethtool_ops = {
+ .get_settings = enic_get_settings,
+ .get_drvinfo = enic_get_drvinfo,
+ .get_msglevel = enic_get_msglevel,
+ .set_msglevel = enic_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = enic_get_strings,
+ .get_sset_count = enic_get_sset_count,
+ .get_ethtool_stats = enic_get_ethtool_stats,
+ .get_coalesce = enic_get_coalesce,
+ .set_coalesce = enic_set_coalesce,
+};
+
+void enic_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 992ec2ee64d9..7b756cf9474a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -31,7 +31,6 @@
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
-#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -73,57 +72,6 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
-struct enic_stat {
- char name[ETH_GSTRING_LEN];
- unsigned int offset;
-};
-
-#define ENIC_TX_STAT(stat) \
- { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
-#define ENIC_RX_STAT(stat) \
- { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
-
-static const struct enic_stat enic_tx_stats[] = {
- ENIC_TX_STAT(tx_frames_ok),
- ENIC_TX_STAT(tx_unicast_frames_ok),
- ENIC_TX_STAT(tx_multicast_frames_ok),
- ENIC_TX_STAT(tx_broadcast_frames_ok),
- ENIC_TX_STAT(tx_bytes_ok),
- ENIC_TX_STAT(tx_unicast_bytes_ok),
- ENIC_TX_STAT(tx_multicast_bytes_ok),
- ENIC_TX_STAT(tx_broadcast_bytes_ok),
- ENIC_TX_STAT(tx_drops),
- ENIC_TX_STAT(tx_errors),
- ENIC_TX_STAT(tx_tso),
-};
-
-static const struct enic_stat enic_rx_stats[] = {
- ENIC_RX_STAT(rx_frames_ok),
- ENIC_RX_STAT(rx_frames_total),
- ENIC_RX_STAT(rx_unicast_frames_ok),
- ENIC_RX_STAT(rx_multicast_frames_ok),
- ENIC_RX_STAT(rx_broadcast_frames_ok),
- ENIC_RX_STAT(rx_bytes_ok),
- ENIC_RX_STAT(rx_unicast_bytes_ok),
- ENIC_RX_STAT(rx_multicast_bytes_ok),
- ENIC_RX_STAT(rx_broadcast_bytes_ok),
- ENIC_RX_STAT(rx_drop),
- ENIC_RX_STAT(rx_no_bufs),
- ENIC_RX_STAT(rx_errors),
- ENIC_RX_STAT(rx_rss),
- ENIC_RX_STAT(rx_crc_errors),
- ENIC_RX_STAT(rx_frames_64),
- ENIC_RX_STAT(rx_frames_127),
- ENIC_RX_STAT(rx_frames_255),
- ENIC_RX_STAT(rx_frames_511),
- ENIC_RX_STAT(rx_frames_1023),
- ENIC_RX_STAT(rx_frames_1518),
- ENIC_RX_STAT(rx_frames_to_max),
-};
-
-static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
-static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
-
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -148,222 +96,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
-{
- return rq;
-}
-
-static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
-{
- return enic->rq_count + wq;
-}
-
-static inline unsigned int enic_legacy_io_intr(void)
-{
- return 0;
-}
-
-static inline unsigned int enic_legacy_err_intr(void)
-{
- return 1;
-}
-
-static inline unsigned int enic_legacy_notify_intr(void)
-{
- return 2;
-}
-
-static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
-{
- return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
-}
-
-static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
-{
- return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
-}
-
-static inline unsigned int enic_msix_err_intr(struct enic *enic)
-{
- return enic->rq_count + enic->wq_count;
-}
-
-static inline unsigned int enic_msix_notify_intr(struct enic *enic)
-{
- return enic->rq_count + enic->wq_count + 1;
-}
-
-static int enic_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct enic *enic = netdev_priv(netdev);
-
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
-
- if (netif_carrier_ok(netdev)) {
- ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
- ecmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
- }
-
- ecmd->autoneg = AUTONEG_DISABLE;
-
- return 0;
-}
-
-static void enic_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct enic *enic = netdev_priv(netdev);
- struct vnic_devcmd_fw_info *fw_info;
-
- enic_dev_fw_info(enic, &fw_info);
-
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, fw_info->fw_version,
- sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- unsigned int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < enic_n_tx_stats; i++) {
- memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < enic_n_rx_stats; i++) {
- memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int enic_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return enic_n_tx_stats + enic_n_rx_stats;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void enic_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct enic *enic = netdev_priv(netdev);
- struct vnic_stats *vstats;
- unsigned int i;
-
- enic_dev_stats_dump(enic, &vstats);
-
- for (i = 0; i < enic_n_tx_stats; i++)
- *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
- for (i = 0; i < enic_n_rx_stats; i++)
- *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
-}
-
-static u32 enic_get_msglevel(struct net_device *netdev)
-{
- struct enic *enic = netdev_priv(netdev);
- return enic->msg_enable;
-}
-
-static void enic_set_msglevel(struct net_device *netdev, u32 value)
-{
- struct enic *enic = netdev_priv(netdev);
- enic->msg_enable = value;
-}
-
-static int enic_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ecmd)
-{
- struct enic *enic = netdev_priv(netdev);
-
- ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
- ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
-
- return 0;
-}
-
-static int enic_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ecmd)
-{
- struct enic *enic = netdev_priv(netdev);
- u32 tx_coalesce_usecs;
- u32 rx_coalesce_usecs;
- unsigned int i, intr;
-
- tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
- rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
-
- switch (vnic_dev_get_intr_mode(enic->vdev)) {
- case VNIC_DEV_INTR_MODE_INTX:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
-
- intr = enic_legacy_io_intr();
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSI:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
-
- vnic_intr_coalescing_timer_set(&enic->intr[0],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSIX:
- for (i = 0; i < enic->wq_count; i++) {
- intr = enic_msix_wq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- }
-
- for (i = 0; i < enic->rq_count; i++) {
- intr = enic_msix_rq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- rx_coalesce_usecs);
- }
-
- break;
- default:
- break;
- }
-
- enic->tx_coalesce_usecs = tx_coalesce_usecs;
- enic->rx_coalesce_usecs = rx_coalesce_usecs;
-
- return 0;
-}
-
-static const struct ethtool_ops enic_ethtool_ops = {
- .get_settings = enic_get_settings,
- .get_drvinfo = enic_get_drvinfo,
- .get_msglevel = enic_get_msglevel,
- .set_msglevel = enic_set_msglevel,
- .get_link = ethtool_op_get_link,
- .get_strings = enic_get_strings,
- .get_sset_count = enic_get_sset_count,
- .get_ethtool_stats = enic_get_ethtool_stats,
- .get_coalesce = enic_get_coalesce,
- .set_coalesce = enic_set_coalesce,
-};
-
static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
struct enic *enic = vnic_dev_priv(wq->vdev);
@@ -396,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
completed_index, enic_wq_free_buf,
opaque);
- if (netif_queue_stopped(enic->netdev) &&
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
vnic_wq_desc_avail(&enic->wq[q_number]) >=
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
- netif_wake_queue(enic->netdev);
+ netif_wake_subqueue(enic->netdev, q_number);
spin_unlock(&enic->wq_lock[q_number]);
@@ -560,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
static irqreturn_t enic_isr_msix_wq(int irq, void *data)
{
struct enic *enic = data;
- unsigned int cq = enic_cq_wq(enic, 0);
- unsigned int intr = enic_msix_wq_intr(enic, 0);
+ unsigned int cq;
+ unsigned int intr;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int wq_work_done;
+ unsigned int wq_irq;
+
+ wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
+ cq = enic_cq_wq(enic, wq_irq);
+ intr = enic_msix_wq_intr(enic, wq_irq);
wq_work_done = vnic_cq_service(&enic->cq[cq],
wq_work_to_do, enic_wq_service, NULL);
@@ -779,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- struct vnic_wq *wq = &enic->wq[0];
+ struct vnic_wq *wq;
unsigned long flags;
+ unsigned int txq_map;
if (skb->len <= 0) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
+ txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
+ wq = &enic->wq[txq_map];
+
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
* which is very likely. In the off chance it's going to take
* more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
@@ -799,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- spin_lock_irqsave(&enic->wq_lock[0], flags);
+ spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+ spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_BUSY;
}
enic_queue_wq_skb(enic, wq, skb);
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
- spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+ spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_OK;
}
@@ -1293,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb_put(skb, bytes_written);
skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, q_number);
+ if (netdev->features & NETIF_F_RXHASH) {
+ skb->rxhash = rss_hash;
+ if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
+ skb->l4_rxhash = true;
+ }
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
skb->csum = htons(checksum);
@@ -1637,7 +1386,7 @@ static int enic_open(struct net_device *netdev)
enic_set_rx_mode(netdev);
- netif_wake_queue(netdev);
+ netif_tx_wake_all_queues(netdev);
for (i = 0; i < enic->rq_count; i++)
napi_enable(&enic->napi[i]);
@@ -2001,6 +1750,7 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
+ spin_lock(&enic->enic_api_lock);
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
@@ -2009,6 +1759,8 @@ static void enic_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
+ spin_unlock(&enic->enic_api_lock);
+ call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
}
@@ -2297,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* instance data is initialized to zero.
*/
- netdev = alloc_etherdev(sizeof(struct enic));
+ netdev = alloc_etherdev_mqs(sizeof(struct enic),
+ ENIC_RQ_MAX, ENIC_WQ_MAX);
if (!netdev)
return -ENOMEM;
@@ -2327,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
- * limitation for the device. Try 40-bit first, and
+ * limitation for the device. Try 64-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
@@ -2345,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 40);
+ "for consistent allocations, aborting\n", 64);
goto err_out_release_regions;
}
using_dac = 1;
@@ -2421,6 +2174,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
spin_lock_init(&enic->devcmd_lock);
+ spin_lock_init(&enic->enic_api_lock);
/*
* Set ingress vlan rewrite mode before vnic initialization
@@ -2462,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
+ netif_set_real_num_tx_queues(netdev, enic->wq_count);
+ netif_set_real_num_rx_queues(netdev, enic->rq_count);
+
/* Setup notification timer, HW reset task, and wq locks
*/
@@ -2496,7 +2253,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &enic_netdev_ops;
netdev->watchdog_timeo = 2 * HZ;
- netdev->ethtool_ops = &enic_ethtool_ops;
+ enic_set_ethtool_ops(netdev);
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (ENIC_SETTING(enic, LOOP)) {
@@ -2510,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ENIC_SETTING(enic, TSO))
netdev->hw_features |= NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO_ECN;
+ if (ENIC_SETTING(enic, RSS))
+ netdev->hw_features |= NETIF_F_RXHASH;
if (ENIC_SETTING(enic, RXCSUM))
netdev->hw_features |= NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 25be2734c3fe..69f60afd6577 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -47,6 +47,9 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
int offload_mode, int cq_entry, int sop, int eop, int loopback)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+ u8 desc_skip_cnt = 1;
+ u8 compressed_send = 0;
+ u64 wrid = 0;
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
@@ -59,7 +62,8 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
(u16)vlan_tag,
(u8)loopback);
- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+ vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
+ (u8)cq_entry, compressed_send, wrid);
}
static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
@@ -120,6 +124,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
dma_addr_t dma_addr, unsigned int len)
{
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+ u64 wrid = 0;
u8 type = os_buf_index ?
RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
@@ -127,7 +132,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
(u64)dma_addr | VNIC_PADDR_TARGET,
type, (u16)len);
- vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
+ vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
}
struct enic;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 97455c573db5..69dd92598b7e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
{
return vdev->res[type].count;
}
+EXPORT_SYMBOL(vnic_dev_get_res_count);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
@@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
return (char __iomem *)vdev->res[type].vaddr;
}
}
+EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
@@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
kfree(vdev);
}
}
+EXPORT_SYMBOL(vnic_dev_unregister);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
@@ -969,6 +972,13 @@ err_out:
vnic_dev_unregister(vdev);
return NULL;
}
+EXPORT_SYMBOL(vnic_dev_register);
+
+struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
+{
+ return vdev->pdev;
+}
+EXPORT_SYMBOL(vnic_dev_get_pdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index f3d9b79ba77e..e670029862a1 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
+struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
int vnic_dev_enable2(struct vnic_dev *vdev, int active);
int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 23d555255cf8..b9a0d78fd639 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -281,11 +281,25 @@ enum vnic_devcmd_cmd {
* 0 if no VIF-CONFIG-INFO TLV was ever received. */
CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx
+ */
+ CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+ /* Set default vlan:
+ * in: (u16)a0=new default vlan
+ * (u16)a1=zero for overriding vlan with param a0,
+ * non-zero for resetting vlan to the default
+ * out: (u16)a0=old default vlan
+ */
+ CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
/* init_prov_info2:
* Variant of CMD_INIT_PROV_INFO, where it will not try to enable
* the vnic until CMD_ENABLE2 is issued.
* (u64)a0=paddr of vnic_devcmd_provinfo
- * (u32)a1=sizeof provision info */
+ * (u32)a1=sizeof provision info
+ */
CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
/* enable2:
@@ -339,16 +353,57 @@ enum vnic_devcmd_cmd {
CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
/*
- * cmd_set_mac_addr
- * set mac address
+ * Set the predefined mac address as default
* in:
* (u48)a0 = mac addr
- *
*/
CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+ /* Update the provisioning info of the given VIF
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info
+ */
+ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+ /* Add a filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ */
+ CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
+
+ /* Delete a filter.
+ * in: (u32) a0=filter identifier
+ */
+ CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
+
+ /* Enable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
+
+ /* Disable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
+
+ /* Stats dump Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u64) a1=host buffer addr for status dump
+ * (u32) a2=length of the buffer
+ */
+ CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
+
+ /* Clear stats for Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ */
+ CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
};
/* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_STANDBY 0x0
#define CMD_ENABLE2_ACTIVE 0x1
/* flags for CMD_OPEN */
@@ -364,6 +419,9 @@ enum vnic_devcmd_cmd {
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
+/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
+#define CMD_QP_RQWQ 0x0
+
/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
@@ -390,6 +448,7 @@ enum vnic_devcmd_error {
ERR_EMAXRES = 10,
ERR_ENOTSUPPORTED = 11,
ERR_EINPROGRESS = 12,
+ ERR_MAX
};
/*
@@ -435,6 +494,115 @@ struct vnic_devcmd_provinfo {
u8 data[0];
};
+/* These are used in flags field of different filters to denote
+ * valid fields used.
+ */
+#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
+
+#define FILTER_FIELDS_USNIC ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4))
+
+#define FILTER_FIELDS_IPV4_5TUPLE ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4) | \
+ FILTER_FIELD_VALID(5))
+
+#define FILTER_FIELDS_MAC_VLAN ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2))
+
+#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+
+struct filter_usnic_id {
+ u32 flags;
+ u16 vlan;
+ u16 ethtype;
+ u8 proto_version;
+ u32 usnic_id;
+} __packed;
+
+#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+
+/* Enums for the protocol field. */
+enum protocol_e {
+ PROTO_UDP = 0,
+ PROTO_TCP = 1,
+};
+
+struct filter_ipv4_5tuple {
+ u32 flags;
+ u32 protocol;
+ u32 src_addr;
+ u32 dst_addr;
+ u16 src_port;
+ u16 dst_port;
+} __packed;
+
+#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+
+struct filter_mac_vlan {
+ u32 flags;
+ u16 vlan;
+ u8 mac_addr[6];
+} __packed;
+
+/* Specifies the filter_action type. */
+enum {
+ FILTER_ACTION_RQ_STEERING = 0,
+ FILTER_ACTION_MAX
+};
+
+struct filter_action {
+ u32 type;
+ union {
+ u32 rq_idx;
+ } u;
+} __packed;
+
+/* Specifies the filter type. */
+enum filter_type {
+ FILTER_USNIC_ID = 0,
+ FILTER_IPV4_5TUPLE = 1,
+ FILTER_MAC_VLAN = 2,
+ FILTER_MAX
+};
+
+struct filter {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ } u;
+} __packed;
+
+enum {
+ CLSF_TLV_FILTER = 0,
+ CLSF_TLV_ACTION = 1,
+};
+
+/* Maximum size of buffer to CMD_ADD_FILTER */
+#define FILTER_MAX_BUF_SIZE 100
+
+struct filter_tlv {
+ u_int32_t type;
+ u_int32_t length;
+ u_int32_t val[0];
+};
+
/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 7e1488fc8ab2..36a2ed606c91 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -30,12 +30,9 @@
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
- vdev = rq->vdev;
-
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!rq->bufs[i])
@@ -141,7 +138,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- u32 fetch_index;
+ u32 fetch_index = 0;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 2056586f4d4b..ee7bc95af278 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -72,6 +72,7 @@ struct vnic_rq_buf {
unsigned int len;
unsigned int index;
void *desc;
+ uint64_t wr_id;
};
struct vnic_rq {
@@ -110,7 +111,8 @@ static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
static inline void vnic_rq_post(struct vnic_rq *rq,
void *os_buf, unsigned int os_buf_index,
- dma_addr_t dma_addr, unsigned int len)
+ dma_addr_t dma_addr, unsigned int len,
+ uint64_t wrid)
{
struct vnic_rq_buf *buf = rq->to_use;
@@ -118,6 +120,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
buf->os_buf_index = os_buf_index;
buf->dma_addr = dma_addr;
buf->len = len;
+ buf->wr_id = wrid;
buf = buf->next;
rq->to_use = buf;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 5e0d7a2be9bc..3e6b8d54dafc 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -30,12 +30,9 @@
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
- vdev = wq->vdev;
-
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!wq->bufs[i])
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 7dd937ac11c2..2c6c70804a39 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -58,6 +58,10 @@ struct vnic_wq_buf {
unsigned int index;
int sop;
void *desc;
+ uint64_t wr_id; /* Cookie */
+ uint8_t cq_entry; /* Gets completion event from hw */
+ uint8_t desc_skip_cnt; /* Num descs to occupy */
+ uint8_t compressed_send; /* Both hdr and payload in one desc */
};
/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
@@ -102,14 +106,20 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
static inline void vnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
- unsigned int len, int sop, int eop)
+ unsigned int len, int sop, int eop,
+ uint8_t desc_skip_cnt, uint8_t cq_entry,
+ uint8_t compressed_send, uint64_t wrid)
{
struct vnic_wq_buf *buf = wq->to_use;
buf->sop = sop;
+ buf->cq_entry = cq_entry;
+ buf->compressed_send = compressed_send;
+ buf->desc_skip_cnt = desc_skip_cnt;
buf->os_buf = eop ? os_buf : NULL;
buf->dma_addr = dma_addr;
buf->len = len;
+ buf->wr_id = wrid;
buf = buf->next;
if (eop) {
@@ -123,7 +133,7 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
}
wq->to_use = buf;
- wq->ring.desc_avail--;
+ wq->ring.desc_avail -= desc_skip_cnt;
}
static inline void vnic_wq_service(struct vnic_wq *wq,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a13b312b50f2..5f5896e522d2 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1384,7 +1384,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
static int
dm9000_probe(struct platform_device *pdev)
{
- struct dm9000_plat_data *pdata = pdev->dev.platform_data;
+ struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct board_info *db; /* Point a board information structure */
struct net_device *ndev;
const unsigned char *mac_src;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 4c830030fb06..2db6c573cec7 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2319,7 +2319,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
struct net_device *dev;
u_long iobase;
- dev = dev_get_drvdata(&pdev->dev);
+ dev = pci_get_drvdata(pdev);
iobase = dev->base_addr;
unregister_netdev (dev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c94152f1c6be..4e8cfa2ac803 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1304,7 +1304,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct tulip_private *tp;
/* See note below on the multiport cards. */
- static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+ static unsigned char last_phys_addr[ETH_ALEN] = {
+ 0x00, 'L', 'i', 'n', 'u', 'x'
+ };
static int last_irq;
static int multiport_cnt; /* For four-port boards w/one EEPROM */
int i, irq;
@@ -1627,8 +1629,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->dev_addr[i] = last_phys_addr[i] + 1;
#if defined(CONFIG_SPARC)
addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == 6)
- memcpy(dev->dev_addr, addr, 6);
+ if (addr && len == ETH_ALEN)
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
#endif
#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
if (last_irq)
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 50d9c6315930..bf3bf6f22c99 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -469,6 +469,17 @@ static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
}
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sundance_poll_controller(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ disable_irq(np->pci_dev->irq);
+ intr_handler(np->pci_dev->irq, dev);
+ enable_irq(np->pci_dev->irq);
+}
+#endif
+
static const struct net_device_ops netdev_ops = {
.ndo_open = netdev_open,
.ndo_stop = netdev_close,
@@ -480,6 +491,9 @@ static const struct net_device_ops netdev_ops = {
.ndo_change_mtu = change_mtu,
.ndo_set_mac_address = sundance_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sundance_poll_controller,
+#endif
};
static int sundance_probe1(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c827b1b6b1ce..ace5050dba38 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.6.62.0u"
+#define DRV_VER "4.9.134.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -99,14 +99,18 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
-#define BE3_MAX_RSS_QS 8
#define BE2_MAX_RSS_QS 4
-#define MAX_RSS_QS BE3_MAX_RSS_QS
-#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
+#define BE3_MAX_RSS_QS 16
+#define BE3_MAX_TX_QS 16
+#define BE3_MAX_EVT_QS 16
+
+#define MAX_RX_QS 32
+#define MAX_EVT_QS 32
+#define MAX_TX_QS 32
-#define MAX_TX_QS 8
#define MAX_ROCE_EQS 5
-#define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */
+#define MAX_MSIX_VECTORS 32
+#define MIN_MSIX_VECTORS 1
#define BE_TX_BUDGET 256
#define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
@@ -189,6 +193,7 @@ struct be_eq_obj {
u32 cur_eqd; /* in usecs */
u8 idx; /* array index */
+ u8 msix_idx;
u16 tx_budget;
u16 spurious_intr;
struct napi_struct napi;
@@ -352,6 +357,18 @@ struct phy_info {
u32 supported;
};
+struct be_resources {
+ u16 max_vfs; /* Total VFs "really" supported by FW/HW */
+ u16 max_mcast_mac;
+ u16 max_tx_qs;
+ u16 max_rss_qs;
+ u16 max_rx_qs;
+ u16 max_uc_mac; /* Max UC MACs programmable */
+ u16 max_vlans; /* Number of vlans supported */
+ u16 max_evt_qs;
+ u32 if_cap_flags;
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -369,18 +386,19 @@ struct be_adapter {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
- u32 num_msix_vec;
- u32 num_evt_qs;
- struct be_eq_obj eq_obj[MAX_MSIX_VECTORS];
+ u16 cfg_num_qs; /* configured via set-channels */
+ u16 num_evt_qs;
+ u16 num_msix_vec;
+ struct be_eq_obj eq_obj[MAX_EVT_QS];
struct msix_entry msix_entries[MAX_MSIX_VECTORS];
bool isr_registered;
/* TX Rings */
- u32 num_tx_qs;
+ u16 num_tx_qs;
struct be_tx_obj tx_obj[MAX_TX_QS];
/* Rx rings */
- u32 num_rx_qs;
+ u16 num_rx_qs;
struct be_rx_obj rx_obj[MAX_RX_QS];
u32 big_page_size; /* Compounded page size shared by rx wrbs */
@@ -430,8 +448,8 @@ struct be_adapter {
u32 flash_status;
struct completion flash_compl;
- u32 num_vfs; /* Number of VFs provisioned by PF driver */
- u32 dev_num_vfs; /* Number of VFs supported by HW */
+ struct be_resources res; /* resources available for the func */
+ u16 num_vfs; /* Number of VFs provisioned by PF */
u8 virtfn;
struct be_vf_cfg *vf_cfg;
bool be3_native;
@@ -446,21 +464,13 @@ struct be_adapter {
u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
- u16 max_mcast_mac;
- u16 max_tx_queues;
- u16 max_rss_queues;
- u16 max_rx_queues;
- u16 max_pmac_cnt;
- u16 max_vlans;
- u16 max_event_queues;
- u32 if_cap_flags;
u8 pf_number;
u64 rss_flags;
};
#define be_physfn(adapter) (!adapter->virtfn)
#define sriov_enabled(adapter) (adapter->num_vfs > 0)
-#define sriov_want(adapter) (adapter->dev_num_vfs && num_vfs && \
+#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \
be_physfn(adapter))
#define for_all_vfs(adapter, vf_cfg, i) \
for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
@@ -469,6 +479,26 @@ struct be_adapter {
#define ON 1
#define OFF 0
+#define be_max_vlans(adapter) (adapter->res.max_vlans)
+#define be_max_uc(adapter) (adapter->res.max_uc_mac)
+#define be_max_mc(adapter) (adapter->res.max_mcast_mac)
+#define be_max_vfs(adapter) (adapter->res.max_vfs)
+#define be_max_rss(adapter) (adapter->res.max_rss_qs)
+#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
+#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
+#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
+#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
+#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
+
+static inline u16 be_max_qs(struct be_adapter *adapter)
+{
+ /* If no RSS, need atleast the one def RXQ */
+ u16 num = max_t(u16, be_max_rss(adapter), 1);
+
+ num = min(num, be_max_eqs(adapter));
+ return min_t(u16, num, num_online_cpus());
+}
+
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
@@ -672,6 +702,8 @@ extern int be_load_fw(struct be_adapter *adapter, u8 *func);
extern bool be_is_wol_supported(struct be_adapter *adapter);
extern bool be_pause_supported(struct be_adapter *adapter);
extern u32 be_get_fw_log_level(struct be_adapter *adapter);
+int be_update_queues(struct be_adapter *adapter);
+int be_poll(struct napi_struct *napi, int budget);
/*
* internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8ec5d74ad44d..1ab5dab11eff 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -258,7 +258,8 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
(struct be_async_event_grp5_pvid_state *)evt);
break;
default:
- dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
+ dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
+ event_type);
break;
}
}
@@ -279,7 +280,8 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
break;
default:
- dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
+ dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
+ event_type);
break;
}
}
@@ -631,6 +633,12 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
return &wrb->payload.sgl[0];
}
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
+ unsigned long addr)
+{
+ wrb->tag0 = addr & 0xFFFFFFFF;
+ wrb->tag1 = upper_32_bits(addr);
+}
/* Don't touch the hdr after it's prepared */
/* mem will be NULL for embedded commands */
@@ -639,17 +647,12 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
{
struct be_sge *sge;
- unsigned long addr = (unsigned long)req_hdr;
- u64 req_addr = addr;
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
req_hdr->version = 0;
-
- wrb->tag0 = req_addr & 0xFFFFFFFF;
- wrb->tag1 = upper_32_bits(req_addr);
-
+ fill_wrb_tags(wrb, (ulong) req_hdr);
wrb->payload_length = cmd_len;
if (mem) {
wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -676,31 +679,6 @@ static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
}
}
-/* Converts interrupt delay in microseconds to multiplier value */
-static u32 eq_delay_to_mult(u32 usec_delay)
-{
-#define MAX_INTR_RATE 651042
- const u32 round = 10;
- u32 multiplier;
-
- if (usec_delay == 0)
- multiplier = 0;
- else {
- u32 interrupt_rate = 1000000 / usec_delay;
- /* Max delay, corresponding to the lowest interrupt rate */
- if (interrupt_rate == 0)
- multiplier = 1023;
- else {
- multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
- multiplier /= interrupt_rate;
- /* Round the multiplier to the closest value.*/
- multiplier = (multiplier + round/2) / round;
- multiplier = min(multiplier, (u32)1023);
- }
- }
- return multiplier;
-}
-
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
{
struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
@@ -728,6 +706,78 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
return wrb;
}
+static bool use_mcc(struct be_adapter *adapter)
+{
+ return adapter->mcc_obj.q.created;
+}
+
+/* Must be used only in process context */
+static int be_cmd_lock(struct be_adapter *adapter)
+{
+ if (use_mcc(adapter)) {
+ spin_lock_bh(&adapter->mcc_lock);
+ return 0;
+ } else {
+ return mutex_lock_interruptible(&adapter->mbox_lock);
+ }
+}
+
+/* Must be used only in process context */
+static void be_cmd_unlock(struct be_adapter *adapter)
+{
+ if (use_mcc(adapter))
+ spin_unlock_bh(&adapter->mcc_lock);
+ else
+ return mutex_unlock(&adapter->mbox_lock);
+}
+
+static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
+ struct be_mcc_wrb *wrb)
+{
+ struct be_mcc_wrb *dest_wrb;
+
+ if (use_mcc(adapter)) {
+ dest_wrb = wrb_from_mccq(adapter);
+ if (!dest_wrb)
+ return NULL;
+ } else {
+ dest_wrb = wrb_from_mbox(adapter);
+ }
+
+ memcpy(dest_wrb, wrb, sizeof(*wrb));
+ if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
+ fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
+
+ return dest_wrb;
+}
+
+/* Must be used only in process context */
+static int be_cmd_notify_wait(struct be_adapter *adapter,
+ struct be_mcc_wrb *wrb)
+{
+ struct be_mcc_wrb *dest_wrb;
+ int status;
+
+ status = be_cmd_lock(adapter);
+ if (status)
+ return status;
+
+ dest_wrb = be_cmd_copy(adapter, wrb);
+ if (!dest_wrb)
+ return -EBUSY;
+
+ if (use_mcc(adapter))
+ status = be_mcc_notify_wait(adapter);
+ else
+ status = be_mbox_notify_wait(adapter);
+
+ if (!status)
+ memcpy(wrb, dest_wrb, sizeof(*wrb));
+
+ be_cmd_unlock(adapter);
+ return status;
+}
+
/* Tell fw we're about to start firing cmds by writing a
* special pattern across the wrb hdr; uses mbox
*/
@@ -788,13 +838,12 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
return status;
}
-int be_cmd_eq_create(struct be_adapter *adapter,
- struct be_queue_info *eq, int eq_delay)
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eq_create *req;
- struct be_dma_mem *q_mem = &eq->dma_mem;
- int status;
+ struct be_dma_mem *q_mem = &eqo->q.dma_mem;
+ int status, ver = 0;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -805,15 +854,18 @@ int be_cmd_eq_create(struct be_adapter *adapter,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+ /* Support for EQ_CREATEv2 available only SH-R onwards */
+ if (!(BEx_chip(adapter) || lancer_chip(adapter)))
+ ver = 2;
+
+ req->hdr.version = ver;
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
/* 4byte eqe*/
AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
AMAP_SET_BITS(struct amap_eq_context, count, req->context,
- __ilog2_u32(eq->len/256));
- AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
- eq_delay_to_mult(eq_delay));
+ __ilog2_u32(eqo->q.len / 256));
be_dws_cpu_to_le(req->context, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -821,8 +873,10 @@ int be_cmd_eq_create(struct be_adapter *adapter,
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
- eq->id = le16_to_cpu(resp->eq_id);
- eq->created = true;
+ eqo->q.id = le16_to_cpu(resp->eq_id);
+ eqo->msix_idx =
+ (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
+ eqo->q.created = true;
}
mutex_unlock(&adapter->mbox_lock);
@@ -1010,9 +1064,9 @@ static u32 be_encoded_q_len(int q_len)
return len_encoded;
}
-int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_ext_create *req;
@@ -1068,9 +1122,9 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
return status;
}
-int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+static int be_cmd_mccq_org_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_create *req;
@@ -1128,25 +1182,16 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
{
- struct be_mcc_wrb *wrb;
+ struct be_mcc_wrb wrb = {0};
struct be_cmd_req_eth_tx_create *req;
struct be_queue_info *txq = &txo->q;
struct be_queue_info *cq = &txo->cq;
struct be_dma_mem *q_mem = &txq->dma_mem;
int status, ver = 0;
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
-
- req = embedded_payload(wrb);
-
+ req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
if (lancer_chip(adapter)) {
req->hdr.version = 1;
@@ -1164,12 +1209,11 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req->cq_id = cpu_to_le16(cq->id);
req->queue_size = be_encoded_q_len(txq->len);
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
-
ver = req->hdr.version;
- status = be_mcc_notify_wait(adapter);
+ status = be_cmd_notify_wait(adapter, &wrb);
if (!status) {
- struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
+ struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
txq->id = le16_to_cpu(resp->cid);
if (ver == 2)
txo->db_offset = le32_to_cpu(resp->db_offset);
@@ -1178,9 +1222,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
txq->created = true;
}
-err:
- spin_unlock_bh(&adapter->mcc_lock);
-
return status;
}
@@ -1309,40 +1350,32 @@ err:
}
/* Create an rx filtering policy configuration on an i/f
- * Uses MCCQ
+ * Will use MBOX only if MCCQ has not been created.
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
u32 *if_handle, u32 domain)
{
- struct be_mcc_wrb *wrb;
+ struct be_mcc_wrb wrb = {0};
struct be_cmd_req_if_create *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
+ req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
-
req->pmac_invalid = true;
- status = be_mcc_notify_wait(adapter);
+ status = be_cmd_notify_wait(adapter, &wrb);
if (!status) {
- struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
+ struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
*if_handle = le32_to_cpu(resp->interface_id);
- }
-err:
- spin_unlock_bh(&adapter->mcc_lock);
+ /* Hack to retrieve VF's pmac-id on BE3 */
+ if (BE3_chip(adapter) && !be_physfn(adapter))
+ adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
+ }
return status;
}
@@ -1460,6 +1493,12 @@ static int be_mac_to_link_speed(int mac_speed)
return 1000;
case PHY_LINK_SPEED_10GBPS:
return 10000;
+ case PHY_LINK_SPEED_20GBPS:
+ return 20000;
+ case PHY_LINK_SPEED_25GBPS:
+ return 25000;
+ case PHY_LINK_SPEED_40GBPS:
+ return 40000;
}
return 0;
}
@@ -1520,7 +1559,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_cntl_addnl_attribs *req;
- int status;
+ int status = 0;
spin_lock_bh(&adapter->mcc_lock);
@@ -1785,8 +1824,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
*/
req->if_flags_mask |=
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
- adapter->if_cap_flags);
-
+ be_if_cap_flags(adapter));
req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
netdev_for_each_mc_addr(ha, adapter->netdev)
memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
@@ -2444,6 +2482,12 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
le16_to_cpu(resp_phy_info->fixed_speeds_supported);
adapter->phy.misc_params =
le32_to_cpu(resp_phy_info->misc_params);
+
+ if (BE2_chip(adapter)) {
+ adapter->phy.fixed_speeds_supported =
+ BE_SUPPORTED_SPEED_10GBPS |
+ BE_SUPPORTED_SPEED_1GBPS;
+ }
}
pci_free_consistent(adapter->pdev, cmd.size,
cmd.va, cmd.dma);
@@ -2606,9 +2650,44 @@ err:
return status;
}
-/* Uses synchronous MCCQ */
+/* Set privilege(s) for a function */
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_fn_privileges *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
+ wrb, NULL);
+ req->hdr.domain = domain;
+ if (lancer_chip(adapter))
+ req->privileges_lancer = cpu_to_le32(privileges);
+ else
+ req->privileges = cpu_to_le32(privileges);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
+ * pmac_id_valid: false => pmac_id or MAC address is requested.
+ * If pmac_id is returned, pmac_id_valid is returned as true
+ */
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id, u8 domain)
+ bool *pmac_id_valid, u32 *pmac_id, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2644,12 +2723,25 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
req->hdr.domain = domain;
req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
- req->perm_override = 1;
+ if (*pmac_id_valid) {
+ req->mac_id = cpu_to_le32(*pmac_id);
+ req->iface_id = cpu_to_le16(adapter->if_handle);
+ req->perm_override = 0;
+ } else {
+ req->perm_override = 1;
+ }
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_mac_list *resp =
get_mac_list_cmd.va;
+
+ if (*pmac_id_valid) {
+ memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
+ ETH_ALEN);
+ goto out;
+ }
+
mac_count = resp->true_mac_count + resp->pseudo_mac_count;
/* Mac list returned could contain one or more active mac_ids
* or one or more true or pseudo permanant mac addresses.
@@ -2667,14 +2759,14 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
* is 6 bytes
*/
if (mac_addr_size == sizeof(u32)) {
- *pmac_id_active = true;
+ *pmac_id_valid = true;
mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
*pmac_id = le32_to_cpu(mac_id);
goto out;
}
}
/* If no active mac_id found, return first mac addr */
- *pmac_id_active = false;
+ *pmac_id_valid = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
ETH_ALEN);
}
@@ -2686,6 +2778,41 @@ out:
return status;
}
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
+{
+ bool active = true;
+
+ if (BEx_chip(adapter))
+ return be_cmd_mac_addr_query(adapter, mac, false,
+ adapter->if_handle, curr_pmac_id);
+ else
+ /* Fetch the MAC address using pmac_id */
+ return be_cmd_get_mac_from_list(adapter, mac, &active,
+ &curr_pmac_id, 0);
+}
+
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
+{
+ int status;
+ bool pmac_valid = false;
+
+ memset(mac, 0, ETH_ALEN);
+
+ if (BEx_chip(adapter)) {
+ if (be_physfn(adapter))
+ status = be_cmd_mac_addr_query(adapter, mac, true, 0,
+ 0);
+ else
+ status = be_cmd_mac_addr_query(adapter, mac, false,
+ adapter->if_handle, 0);
+ } else {
+ status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
+ NULL, 0);
+ }
+
+ return status;
+}
+
/* Uses synchronous MCCQ */
int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain)
@@ -2729,8 +2856,27 @@ err:
return status;
}
+/* Wrapper to delete any active MACs and provision the new mac.
+ * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
+ * current list are active.
+ */
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
+{
+ bool active_mac = false;
+ u8 old_mac[ETH_ALEN];
+ u32 pmac_id;
+ int status;
+
+ status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
+ &pmac_id, dom);
+ if (!status && active_mac)
+ be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
+
+ return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
+}
+
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id)
+ u32 domain, u16 intf_id, u16 hsw_mode)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_hsw_config *req;
@@ -2757,6 +2903,13 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
}
+ if (!BEx_chip(adapter) && hsw_mode) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
+ ctxt, adapter->hba_port_num);
+ AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
+ AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
+ ctxt, hsw_mode);
+ }
be_dws_cpu_to_le(req->context, sizeof(req->context));
status = be_mcc_notify_wait(adapter);
@@ -2768,7 +2921,7 @@ err:
/* Get Hyper switch config */
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id)
+ u32 domain, u16 intf_id, u8 *mode)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_hsw_config *req;
@@ -2791,9 +2944,15 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
- AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
- intf_id);
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
+ ctxt, intf_id);
AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
+
+ if (!BEx_chip(adapter)) {
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
+ ctxt, adapter->hba_port_num);
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
+ }
be_dws_cpu_to_le(req->context, sizeof(req->context));
status = be_mcc_notify_wait(adapter);
@@ -2804,7 +2963,11 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
sizeof(resp->context));
vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
pvid, &resp->context);
- *pvid = le16_to_cpu(vid);
+ if (pvid)
+ *pvid = le16_to_cpu(vid);
+ if (mode)
+ *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ port_fwd_type, &resp->context);
}
err:
@@ -2967,30 +3130,63 @@ err:
return status;
}
-static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
- u32 max_buf_size)
+static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
{
- struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
int i;
for (i = 0; i < desc_count; i++) {
- desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
- if (((void *)desc + desc->desc_len) >
- (void *)(buf + max_buf_size))
- return NULL;
+ if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
+ return (struct be_nic_res_desc *)hdr;
- if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
- desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
- return desc;
-
- desc = (void *)desc + desc->desc_len;
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
}
+ return NULL;
+}
+static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
+ u32 desc_count)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_pcie_res_desc *pcie;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_num == devfn)
+ return pcie;
+ }
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
return NULL;
}
+static void be_copy_nic_desc(struct be_resources *res,
+ struct be_nic_res_desc *desc)
+{
+ res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
+ res->max_vlans = le16_to_cpu(desc->vlan_count);
+ res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
+ res->max_tx_qs = le16_to_cpu(desc->txq_count);
+ res->max_rss_qs = le16_to_cpu(desc->rssq_count);
+ res->max_rx_qs = le16_to_cpu(desc->rq_count);
+ res->max_evt_qs = le16_to_cpu(desc->eq_count);
+ /* Clear flags that driver is not interested in */
+ res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
+ BE_IF_CAP_FLAGS_WANT;
+ /* Need 1 RXQ as the default RXQ */
+ if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
+ res->max_rss_qs -= 1;
+}
+
/* Uses Mbox */
-int be_cmd_get_func_config(struct be_adapter *adapter)
+int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_func_config *req;
@@ -3029,28 +3225,16 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
if (!status) {
struct be_cmd_resp_get_func_config *resp = cmd.va;
u32 desc_count = le32_to_cpu(resp->desc_count);
- struct be_nic_resource_desc *desc;
+ struct be_nic_res_desc *desc;
- desc = be_get_nic_desc(resp->func_param, desc_count,
- sizeof(resp->func_param));
+ desc = be_get_nic_desc(resp->func_param, desc_count);
if (!desc) {
status = -EINVAL;
goto err;
}
adapter->pf_number = desc->pf_num;
- adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
- adapter->max_vlans = le16_to_cpu(desc->vlan_count);
- adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
- adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
- adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
- adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
-
- adapter->max_event_queues = le16_to_cpu(desc->eq_count);
- adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
-
- /* Clear flags that driver is not interested in */
- adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
+ be_copy_nic_desc(res, desc);
}
err:
mutex_unlock(&adapter->mbox_lock);
@@ -3060,8 +3244,8 @@ err:
}
/* Uses mbox */
-int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3088,8 +3272,8 @@ int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
}
/* Uses sync mcc */
-int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3121,54 +3305,51 @@ err:
}
/* Uses sync mcc, if MCCQ is already created otherwise mbox */
-int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- u16 *txq_count, u8 domain)
+int be_cmd_get_profile_config(struct be_adapter *adapter,
+ struct be_resources *res, u8 domain)
{
+ struct be_cmd_resp_get_profile_config *resp;
+ struct be_pcie_res_desc *pcie;
+ struct be_nic_res_desc *nic;
struct be_queue_info *mccq = &adapter->mcc_obj.q;
struct be_dma_mem cmd;
+ u32 desc_count;
int status;
memset(&cmd, 0, sizeof(struct be_dma_mem));
- if (!lancer_chip(adapter))
- cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1);
- else
- cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
- if (!cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ if (!cmd.va)
return -ENOMEM;
- }
if (!mccq->created)
status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
else
status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
- if (!status) {
- struct be_cmd_resp_get_profile_config *resp = cmd.va;
- u32 desc_count = le32_to_cpu(resp->desc_count);
- struct be_nic_resource_desc *desc;
+ if (status)
+ goto err;
- desc = be_get_nic_desc(resp->func_param, desc_count,
- sizeof(resp->func_param));
+ resp = cmd.va;
+ desc_count = le32_to_cpu(resp->desc_count);
+
+ pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+ desc_count);
+ if (pcie)
+ res->max_vfs = le16_to_cpu(pcie->num_vfs);
+
+ nic = be_get_nic_desc(resp->func_param, desc_count);
+ if (nic)
+ be_copy_nic_desc(res, nic);
- if (!desc) {
- status = -EINVAL;
- goto err;
- }
- if (cap_flags)
- *cap_flags = le32_to_cpu(desc->cap_flags);
- if (txq_count)
- *txq_count = le32_to_cpu(desc->txq_count);
- }
err:
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
return status;
}
-/* Uses sync mcc */
+/* Currently only Lancer uses this command and it supports version 0 only
+ * Uses sync mcc
+ */
int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain)
{
@@ -3189,12 +3370,10 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
wrb, NULL);
-
req->hdr.domain = domain;
req->desc_count = cpu_to_le32(1);
-
- req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
- req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
+ req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
+ req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
req->nic_desc.pf_num = adapter->pf_number;
req->nic_desc.vf_num = domain;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1b3b9e886412..d026226db88c 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -202,6 +202,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PORT_NAME 77
#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
+#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -306,7 +307,7 @@ struct be_cmd_req_eq_create {
struct be_cmd_resp_eq_create {
struct be_cmd_resp_hdr resp_hdr;
u16 eq_id; /* sword */
- u16 rsvd0; /* sword */
+ u16 msix_idx; /* available only in v2 */
} __packed;
/******************** Mac query ***************************/
@@ -965,7 +966,10 @@ enum {
PHY_LINK_SPEED_10MBPS = 0x1,
PHY_LINK_SPEED_100MBPS = 0x2,
PHY_LINK_SPEED_1GBPS = 0x3,
- PHY_LINK_SPEED_10GBPS = 0x4
+ PHY_LINK_SPEED_10GBPS = 0x4,
+ PHY_LINK_SPEED_20GBPS = 0x5,
+ PHY_LINK_SPEED_25GBPS = 0x6,
+ PHY_LINK_SPEED_40GBPS = 0x7
};
struct be_cmd_resp_link_status {
@@ -1480,6 +1484,11 @@ struct be_cmd_resp_get_fn_privileges {
u32 privilege_mask;
};
+struct be_cmd_req_set_fn_privileges {
+ struct be_cmd_req_hdr hdr;
+ u32 privileges; /* Used by BE3, SH-R */
+ u32 privileges_lancer; /* Used by Lancer */
+};
/******************** GET/SET_MACLIST **************************/
#define BE_MAX_MAC 64
@@ -1524,12 +1533,17 @@ struct be_cmd_req_set_mac_list {
} __packed;
/*********************** HSW Config ***********************/
+#define PORT_FWD_TYPE_VEPA 0x3
+#define PORT_FWD_TYPE_VEB 0x2
+
struct amap_set_hsw_context {
u8 interface_id[16];
u8 rsvd0[14];
u8 pvid_valid;
- u8 rsvd1;
- u8 rsvd2[16];
+ u8 pport;
+ u8 rsvd1[6];
+ u8 port_fwd_type[3];
+ u8 rsvd2[7];
u8 pvid[16];
u8 rsvd3[32];
u8 rsvd4[32];
@@ -1554,7 +1568,9 @@ struct amap_get_hsw_req_context {
} __packed;
struct amap_get_hsw_resp_context {
- u8 rsvd1[16];
+ u8 rsvd0[6];
+ u8 port_fwd_type[3];
+ u8 rsvd1[7];
u8 pvid[16];
u8 rsvd2[32];
u8 rsvd3[32];
@@ -1709,11 +1725,13 @@ struct be_cmd_req_set_ext_fat_caps {
struct be_fat_conf_params set_params;
};
-#define RESOURCE_DESC_SIZE 88
+#define RESOURCE_DESC_SIZE_V0 72
+#define RESOURCE_DESC_SIZE_V1 88
+#define PCIE_RESOURCE_DESC_TYPE_V0 0x40
#define NIC_RESOURCE_DESC_TYPE_V0 0x41
+#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
#define NIC_RESOURCE_DESC_TYPE_V1 0x51
-#define MAX_RESOURCE_DESC 4
-#define MAX_RESOURCE_DESC_V1 32
+#define MAX_RESOURCE_DESC 264
/* QOS unit number */
#define QUN 4
@@ -1722,9 +1740,30 @@ struct be_cmd_req_set_ext_fat_caps {
/* No save */
#define NOSV 7
-struct be_nic_resource_desc {
+struct be_res_desc_hdr {
u8 desc_type;
u8 desc_len;
+} __packed;
+
+struct be_pcie_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd0;
+ u8 flags;
+ u16 rsvd1;
+ u8 pf_num;
+ u8 rsvd2;
+ u32 rsvd3;
+ u8 sriov_state;
+ u8 pf_state;
+ u8 pf_type;
+ u8 rsvd4;
+ u16 num_vfs;
+ u16 rsvd5;
+ u32 rsvd6[17];
+} __packed;
+
+struct be_nic_res_desc {
+ struct be_res_desc_hdr hdr;
u8 rsvd1;
u8 flags;
u8 vf_num;
@@ -1753,7 +1792,7 @@ struct be_nic_resource_desc {
u8 wol_param;
u16 rsvd7;
u32 rsvd8[3];
-};
+} __packed;
struct be_cmd_req_get_func_config {
struct be_cmd_req_hdr hdr;
@@ -1762,7 +1801,7 @@ struct be_cmd_req_get_func_config {
struct be_cmd_resp_get_func_config {
struct be_cmd_resp_hdr hdr;
u32 desc_count;
- u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
};
#define ACTIVE_PROFILE_TYPE 0x2
@@ -1774,26 +1813,20 @@ struct be_cmd_req_get_profile_config {
};
struct be_cmd_resp_get_profile_config {
- struct be_cmd_req_hdr hdr;
- u32 desc_count;
- u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
-};
-
-struct be_cmd_resp_get_profile_config_v1 {
- struct be_cmd_req_hdr hdr;
+ struct be_cmd_resp_hdr hdr;
u32 desc_count;
- u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE];
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
};
struct be_cmd_req_set_profile_config {
struct be_cmd_req_hdr hdr;
u32 rsvd;
u32 desc_count;
- struct be_nic_resource_desc nic_desc;
+ struct be_nic_res_desc nic_desc;
};
struct be_cmd_resp_set_profile_config {
- struct be_cmd_req_hdr hdr;
+ struct be_cmd_resp_hdr hdr;
};
struct be_cmd_enable_disable_vf {
@@ -1842,8 +1875,7 @@ extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u32 *if_handle, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter,
- struct be_queue_info *eq, int eq_delay);
+extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
extern int be_cmd_cq_create(struct be_adapter *adapter,
struct be_queue_info *cq, struct be_queue_info *eq,
bool no_delay, int num_cqe_dma_coalesce);
@@ -1927,15 +1959,22 @@ extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
u32 *privilege, u32 domain);
+extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
+ u32 privileges, u32 vf_num);
extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
bool *pmac_id_active, u32 *pmac_id,
u8 domain);
+extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
+ u8 *mac);
+extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain);
+extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
+ u32 dom);
extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id);
+ u32 domain, u16 intf_id, u16 hsw_mode);
extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id);
+ u32 domain, u16 intf_id, u8 *mode);
extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd);
@@ -1948,10 +1987,10 @@ extern int lancer_initiate_dump(struct be_adapter *adapter);
extern bool dump_present(struct be_adapter *adapter);
extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
-extern int be_cmd_get_func_config(struct be_adapter *adapter);
-extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- u16 *txq_count, u8 domain);
-
+int be_cmd_get_func_config(struct be_adapter *adapter,
+ struct be_resources *res);
+int be_cmd_get_profile_config(struct be_adapter *adapter,
+ struct be_resources *res, u8 domain);
extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain);
extern int be_cmd_get_if_id(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 4f8c941217cc..b440a1fac77b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1119,6 +1119,29 @@ static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return status;
}
+static void be_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ ch->combined_count = adapter->num_evt_qs;
+ ch->max_combined = be_max_qs(adapter);
+}
+
+static int be_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (ch->rx_count || ch->tx_count || ch->other_count ||
+ !ch->combined_count || ch->combined_count > be_max_qs(adapter))
+ return -EINVAL;
+
+ adapter->cfg_num_qs = ch->combined_count;
+
+ return be_update_queues(adapter);
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1145,4 +1168,6 @@ const struct ethtool_ops be_ethtool_ops = {
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
.set_rxnfc = be_set_rxnfc,
+ .get_channels = be_get_channels,
+ .set_channels = be_set_channels
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3d91a5ec61a4..3224d28cdad4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -21,6 +21,7 @@
#include "be_cmds.h"
#include <asm/div64.h>
#include <linux/aer.h>
+#include <linux/if_bridge.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -145,8 +146,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL | __GFP_ZERO);
+ mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
@@ -247,54 +248,54 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
static int be_mac_addr_set(struct net_device *netdev, void *p)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
struct sockaddr *addr = p;
- int status = 0;
- u8 current_mac[ETH_ALEN];
- u32 pmac_id = adapter->pmac_id[0];
- bool active_mac = true;
+ int status;
+ u8 mac[ETH_ALEN];
+ u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- /* For BE VF, MAC address is already activated by PF.
- * Hence only operation left is updating netdev->devaddr.
- * Update it if user is passing the same MAC which was used
- * during configuring VF MAC from PF(Hypervisor).
+ /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
+ * privilege or if PF did not provision the new MAC address.
+ * On BE3, this cmd will always fail if the VF doesn't have the
+ * FILTMGMT privilege. This failure is OK, only if the PF programmed
+ * the MAC for the VF.
*/
- if (!lancer_chip(adapter) && !be_physfn(adapter)) {
- status = be_cmd_mac_addr_query(adapter, current_mac,
- false, adapter->if_handle, 0);
- if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
- goto done;
- else
- goto err;
- }
+ status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
+ adapter->if_handle, &adapter->pmac_id[0], 0);
+ if (!status) {
+ curr_pmac_id = adapter->pmac_id[0];
- if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
- goto done;
+ /* Delete the old programmed MAC. This call may fail if the
+ * old MAC was already deleted by the PF driver.
+ */
+ if (adapter->pmac_id[0] != old_pmac_id)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ old_pmac_id, 0);
+ }
- /* For Lancer check if any MAC is active.
- * If active, get its mac id.
+ /* Decide if the new MAC is successfully activated only after
+ * querying the FW
*/
- if (lancer_chip(adapter) && !be_physfn(adapter))
- be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
- &pmac_id, 0);
-
- status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle,
- &adapter->pmac_id[0], 0);
-
+ status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
if (status)
goto err;
- if (active_mac)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- pmac_id, 0);
-done:
+ /* The MAC change did not happen, either due to lack of privilege
+ * or PF didn't pre-provision.
+ */
+ if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
+ status = -EPERM;
+ goto err;
+ }
+
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ dev_info(dev, "MAC address changed to %pM\n", mac);
return 0;
err:
- dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
+ dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
return status;
}
@@ -472,7 +473,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
ACCESS_ONCE(*acc) = newacc;
}
-void populate_erx_stats(struct be_adapter *adapter,
+static void populate_erx_stats(struct be_adapter *adapter,
struct be_rx_obj *rxo,
u32 erx_stat)
{
@@ -1001,7 +1002,7 @@ static int be_vid_config(struct be_adapter *adapter)
if (adapter->promiscuous)
return 0;
- if (adapter->vlans_added > adapter->max_vlans)
+ if (adapter->vlans_added > be_max_vlans(adapter))
goto set_vlan_promisc;
/* Construct VLAN Table to give to HW */
@@ -1042,7 +1043,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
goto ret;
adapter->vlan_tag[vid] = 1;
- if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
status = be_vid_config(adapter);
if (!status)
@@ -1068,7 +1069,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
goto ret;
adapter->vlan_tag[vid] = 0;
- if (adapter->vlans_added <= adapter->max_vlans)
+ if (adapter->vlans_added <= be_max_vlans(adapter))
status = be_vid_config(adapter);
if (!status)
@@ -1101,7 +1102,7 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > adapter->max_mcast_mac) {
+ netdev_mc_count(netdev) > be_max_mc(adapter)) {
be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
goto done;
}
@@ -1115,7 +1116,7 @@ static void be_set_rx_mode(struct net_device *netdev)
adapter->pmac_id[i], 0);
}
- if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
+ if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
adapter->promiscuous = true;
goto done;
@@ -1146,9 +1147,6 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status;
- bool active_mac = false;
- u32 pmac_id;
- u8 old_mac[ETH_ALEN];
if (!sriov_enabled(adapter))
return -EPERM;
@@ -1156,20 +1154,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
return -EINVAL;
- if (lancer_chip(adapter)) {
- status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
- &pmac_id, vf + 1);
- if (!status && active_mac)
- be_cmd_pmac_del(adapter, vf_cfg->if_handle,
- pmac_id, vf + 1);
-
- status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
- } else {
- status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
- vf_cfg->pmac_id, vf + 1);
+ if (BEx_chip(adapter)) {
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
+ vf + 1);
status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
&vf_cfg->pmac_id, vf + 1);
+ } else {
+ status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+ vf + 1);
}
if (status)
@@ -1220,14 +1213,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
adapter->vf_cfg[vf].vlan_tag = vlan;
status = be_cmd_set_hsw_config(adapter, vlan,
- vf + 1, adapter->vf_cfg[vf].if_handle);
+ vf + 1, adapter->vf_cfg[vf].if_handle, 0);
}
} else {
/* Reset Transparent Vlan Tagging. */
adapter->vf_cfg[vf].vlan_tag = 0;
vlan = adapter->vf_cfg[vf].def_vid;
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- adapter->vf_cfg[vf].if_handle);
+ adapter->vf_cfg[vf].if_handle, 0);
}
@@ -1490,8 +1483,9 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
}
/* Process the RX completion indicated by rxcp when GRO is enabled */
-void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
- struct be_rx_compl_info *rxcp)
+static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
+ struct napi_struct *napi,
+ struct be_rx_compl_info *rxcp)
{
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info;
@@ -1920,6 +1914,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ netif_napi_del(&eqo->napi);
}
be_queue_free(adapter, &eqo->q);
}
@@ -1931,9 +1926,12 @@ static int be_evt_queues_create(struct be_adapter *adapter)
struct be_eq_obj *eqo;
int i, rc;
- adapter->num_evt_qs = num_irqs(adapter);
+ adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
+ adapter->cfg_num_qs);
for_all_evt_queues(adapter, eqo, i) {
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
eqo->adapter = adapter;
eqo->tx_budget = BE_TX_BUDGET;
eqo->idx = i;
@@ -1946,7 +1944,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
if (rc)
return rc;
- rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
+ rc = be_cmd_eq_create(adapter, eqo);
if (rc)
return rc;
}
@@ -2020,31 +2018,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
}
}
-static int be_num_txqs_want(struct be_adapter *adapter)
-{
- if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
- be_is_mc(adapter) ||
- (!lancer_chip(adapter) && !be_physfn(adapter)) ||
- BE2_chip(adapter))
- return 1;
- else
- return adapter->max_tx_queues;
-}
-
-static int be_tx_cqs_create(struct be_adapter *adapter)
+static int be_tx_qs_create(struct be_adapter *adapter)
{
struct be_queue_info *cq, *eq;
- int status;
struct be_tx_obj *txo;
- u8 i;
+ int status, i;
- adapter->num_tx_qs = be_num_txqs_want(adapter);
- if (adapter->num_tx_qs != MAX_TX_QS) {
- rtnl_lock();
- netif_set_real_num_tx_queues(adapter->netdev,
- adapter->num_tx_qs);
- rtnl_unlock();
- }
+ adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
for_all_tx_queues(adapter, txo, i) {
cq = &txo->cq;
@@ -2060,16 +2040,7 @@ static int be_tx_cqs_create(struct be_adapter *adapter)
status = be_cmd_cq_create(adapter, cq, eq, false, 3);
if (status)
return status;
- }
- return 0;
-}
-static int be_tx_qs_create(struct be_adapter *adapter)
-{
- struct be_tx_obj *txo;
- int i, status;
-
- for_all_tx_queues(adapter, txo, i) {
status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
sizeof(struct be_eth_wrb));
if (status)
@@ -2105,17 +2076,14 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
struct be_rx_obj *rxo;
int rc, i;
- /* We'll create as many RSS rings as there are irqs.
- * But when there's only one irq there's no use creating RSS rings
+ /* We can create as many RSS rings as there are EQs. */
+ adapter->num_rx_qs = adapter->num_evt_qs;
+
+ /* We'll use RSS only if atleast 2 RSS rings are supported.
+ * When RSS is used, we'll need a default RXQ for non-IP traffic.
*/
- adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
- num_irqs(adapter) + 1 : 1;
- if (adapter->num_rx_qs != MAX_RX_QS) {
- rtnl_lock();
- netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_qs);
- rtnl_unlock();
- }
+ if (adapter->num_rx_qs > 1)
+ adapter->num_rx_qs++;
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
@@ -2379,38 +2347,24 @@ static void be_msix_disable(struct be_adapter *adapter)
if (msix_enabled(adapter)) {
pci_disable_msix(adapter->pdev);
adapter->num_msix_vec = 0;
+ adapter->num_msix_roce_vec = 0;
}
}
-static uint be_num_rss_want(struct be_adapter *adapter)
-{
- u32 num = 0;
-
- if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- (lancer_chip(adapter) ||
- (!sriov_want(adapter) && be_physfn(adapter)))) {
- num = adapter->max_rss_queues;
- num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
- }
- return num;
-}
-
static int be_msix_enable(struct be_adapter *adapter)
{
-#define BE_MIN_MSIX_VECTORS 1
- int i, status, num_vec, num_roce_vec = 0;
+ int i, status, num_vec;
struct device *dev = &adapter->pdev->dev;
- /* If RSS queues are not used, need a vec for default RX Q */
- num_vec = min(be_num_rss_want(adapter), num_online_cpus());
- if (be_roce_supported(adapter)) {
- num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
- (num_online_cpus() + 1));
- num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
- num_vec += num_roce_vec;
- num_vec = min(num_vec, MAX_MSIX_VECTORS);
- }
- num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
+ /* If RoCE is supported, program the max number of NIC vectors that
+ * may be configured via set-channels, along with vectors needed for
+ * RoCe. Else, just program the number we'll use initially.
+ */
+ if (be_roce_supported(adapter))
+ num_vec = min_t(int, 2 * be_max_eqs(adapter),
+ 2 * num_online_cpus());
+ else
+ num_vec = adapter->cfg_num_qs;
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
@@ -2418,7 +2372,7 @@ static int be_msix_enable(struct be_adapter *adapter)
status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
if (status == 0) {
goto done;
- } else if (status >= BE_MIN_MSIX_VECTORS) {
+ } else if (status >= MIN_MSIX_VECTORS) {
num_vec = status;
status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
num_vec);
@@ -2427,30 +2381,29 @@ static int be_msix_enable(struct be_adapter *adapter)
}
dev_warn(dev, "MSIx enable failed\n");
+
/* INTx is not supported in VFs, so fail probe if enable_msix fails */
if (!be_physfn(adapter))
return status;
return 0;
done:
- if (be_roce_supported(adapter)) {
- if (num_vec > num_roce_vec) {
- adapter->num_msix_vec = num_vec - num_roce_vec;
- adapter->num_msix_roce_vec =
- num_vec - adapter->num_msix_vec;
- } else {
- adapter->num_msix_vec = num_vec;
- adapter->num_msix_roce_vec = 0;
- }
- } else
- adapter->num_msix_vec = num_vec;
- dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
+ if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
+ adapter->num_msix_roce_vec = num_vec / 2;
+ dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
+ adapter->num_msix_roce_vec);
+ }
+
+ adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
+
+ dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
+ adapter->num_msix_vec);
return 0;
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eqo)
{
- return adapter->msix_entries[eqo->idx].vector;
+ return adapter->msix_entries[eqo->msix_idx].vector;
}
static int be_msix_register(struct be_adapter *adapter)
@@ -2690,8 +2643,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
memset(mac, 0, ETH_ALEN);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL | __GFP_ZERO);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (cmd.va == NULL)
return -1;
@@ -2735,13 +2688,13 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
be_vf_eth_addr_generate(adapter, mac);
for_all_vfs(adapter, vf_cfg, vf) {
- if (lancer_chip(adapter)) {
- status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
- } else {
+ if (BEx_chip(adapter))
status = be_cmd_pmac_add(adapter, mac,
vf_cfg->if_handle,
&vf_cfg->pmac_id, vf + 1);
- }
+ else
+ status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+ vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
@@ -2759,7 +2712,7 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
int status, vf;
u8 mac[ETH_ALEN];
struct be_vf_cfg *vf_cfg;
- bool active;
+ bool active = false;
for_all_vfs(adapter, vf_cfg, vf) {
be_cmd_get_mac_from_list(adapter, mac, &active,
@@ -2788,11 +2741,12 @@ static void be_vf_clear(struct be_adapter *adapter)
pci_disable_sriov(adapter->pdev);
for_all_vfs(adapter, vf_cfg, vf) {
- if (lancer_chip(adapter))
- be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
- else
+ if (BEx_chip(adapter))
be_cmd_pmac_del(adapter, vf_cfg->if_handle,
vf_cfg->pmac_id, vf + 1);
+ else
+ be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
+ vf + 1);
be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
}
@@ -2801,28 +2755,40 @@ done:
adapter->num_vfs = 0;
}
-static int be_clear(struct be_adapter *adapter)
+static void be_clear_queues(struct be_adapter *adapter)
{
- int i = 1;
+ be_mcc_queues_destroy(adapter);
+ be_rx_cqs_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+ be_evt_queues_destroy(adapter);
+}
+static void be_cancel_worker(struct be_adapter *adapter)
+{
if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
cancel_delayed_work_sync(&adapter->work);
adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
}
+}
+
+static int be_clear(struct be_adapter *adapter)
+{
+ int i;
+
+ be_cancel_worker(adapter);
if (sriov_enabled(adapter))
be_vf_clear(adapter);
- for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
+ /* delete the primary mac along with the uc-mac list */
+ for (i = 0; i < (adapter->uc_macs + 1); i++)
be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
+ adapter->pmac_id[i], 0);
+ adapter->uc_macs = 0;
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
- be_mcc_queues_destroy(adapter);
- be_rx_cqs_destroy(adapter);
- be_tx_queues_destroy(adapter);
- be_evt_queues_destroy(adapter);
+ be_clear_queues(adapter);
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
@@ -2833,6 +2799,7 @@ static int be_clear(struct be_adapter *adapter)
static int be_vfs_if_create(struct be_adapter *adapter)
{
+ struct be_resources res = {0};
struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
int status;
@@ -2841,9 +2808,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
BE_IF_FLAGS_MULTICAST;
for_all_vfs(adapter, vf_cfg, vf) {
- if (!BE3_chip(adapter))
- be_cmd_get_profile_config(adapter, &cap_flags,
- NULL, vf + 1);
+ if (!BE3_chip(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res,
+ vf + 1);
+ if (!status)
+ cap_flags = res.if_cap_flags;
+ }
/* If a FW profile exists, then cap_flags are updated */
en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@ -2880,6 +2850,7 @@ static int be_vf_setup(struct be_adapter *adapter)
u16 def_vlan, lnk_speed;
int status, old_vfs, vf;
struct device *dev = &adapter->pdev->dev;
+ u32 privileges;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -2888,10 +2859,10 @@ static int be_vf_setup(struct be_adapter *adapter)
dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
adapter->num_vfs = old_vfs;
} else {
- if (num_vfs > adapter->dev_num_vfs)
+ if (num_vfs > be_max_vfs(adapter))
dev_info(dev, "Device supports %d VFs and not %d\n",
- adapter->dev_num_vfs, num_vfs);
- adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
+ be_max_vfs(adapter), num_vfs);
+ adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
if (!adapter->num_vfs)
return 0;
}
@@ -2923,6 +2894,18 @@ static int be_vf_setup(struct be_adapter *adapter)
}
for_all_vfs(adapter, vf_cfg, vf) {
+ /* Allow VFs to programs MAC/VLAN filters */
+ status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
+ if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_set_fn_privileges(adapter,
+ privileges |
+ BE_PRIV_FILTMGMT,
+ vf + 1);
+ if (!status)
+ dev_info(dev, "VF%d has FILTMGMT privilege\n",
+ vf);
+ }
+
/* BE3 FW, by default, caps VF TX-rate to 100mbps.
* Allow full available bandwidth
*/
@@ -2935,7 +2918,7 @@ static int be_vf_setup(struct be_adapter *adapter)
vf_cfg->tx_rate = lnk_speed;
status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle);
+ vf + 1, vf_cfg->if_handle, NULL);
if (status)
goto err;
vf_cfg->def_vid = def_vlan;
@@ -2958,6 +2941,51 @@ err:
return status;
}
+/* On BE2/BE3 FW does not suggest the supported limits */
+static void BEx_get_resources(struct be_adapter *adapter,
+ struct be_resources *res)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ bool use_sriov = false;
+
+ if (BE3_chip(adapter) && be_physfn(adapter)) {
+ int max_vfs;
+
+ max_vfs = pci_sriov_get_totalvfs(pdev);
+ res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+ use_sriov = res->max_vfs && num_vfs;
+ }
+
+ if (be_physfn(adapter))
+ res->max_uc_mac = BE_UC_PMAC_COUNT;
+ else
+ res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
+
+ if (adapter->function_mode & FLEX10_MODE)
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ res->max_mcast_mac = BE_MAX_MC;
+
+ if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
+ !be_physfn(adapter))
+ res->max_tx_qs = 1;
+ else
+ res->max_tx_qs = BE3_MAX_TX_QS;
+
+ if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ !use_sriov && be_physfn(adapter))
+ res->max_rss_qs = (adapter->be3_native) ?
+ BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ res->max_rx_qs = res->max_rss_qs + 1;
+
+ res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
+
+ res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
+ if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
+ res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
+}
+
static void be_setup_init(struct be_adapter *adapter)
{
adapter->vlan_prio_bmap = 0xff;
@@ -2971,118 +2999,56 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->cmd_privileges = MIN_PRIVILEGES;
}
-static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
- bool *active_mac, u32 *pmac_id)
+static int be_get_resources(struct be_adapter *adapter)
{
- int status = 0;
-
- if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
- memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
- if (!lancer_chip(adapter) && !be_physfn(adapter))
- *active_mac = true;
- else
- *active_mac = false;
+ struct device *dev = &adapter->pdev->dev;
+ struct be_resources res = {0};
+ int status;
- return status;
+ if (BEx_chip(adapter)) {
+ BEx_get_resources(adapter, &res);
+ adapter->res = res;
}
- if (lancer_chip(adapter)) {
- status = be_cmd_get_mac_from_list(adapter, mac,
- active_mac, pmac_id, 0);
- if (*active_mac) {
- status = be_cmd_mac_addr_query(adapter, mac, false,
- if_handle, *pmac_id);
- }
- } else if (be_physfn(adapter)) {
- /* For BE3, for PF get permanent MAC */
- status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
- *active_mac = false;
- } else {
- /* For BE3, for VF get soft MAC assigned by PF*/
- status = be_cmd_mac_addr_query(adapter, mac, false,
- if_handle, 0);
- *active_mac = true;
+ /* For BE3 only check if FW suggests a different max-txqs value */
+ if (BE3_chip(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res, 0);
+ if (!status && res.max_tx_qs)
+ adapter->res.max_tx_qs =
+ min(adapter->res.max_tx_qs, res.max_tx_qs);
}
- return status;
-}
-
-static void be_get_resources(struct be_adapter *adapter)
-{
- u16 dev_num_vfs;
- int pos, status;
- bool profile_present = false;
- u16 txq_count = 0;
+ /* For Lancer, SH etc read per-function resource limits from FW.
+ * GET_FUNC_CONFIG returns per function guaranteed limits.
+ * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
+ */
if (!BEx_chip(adapter)) {
- status = be_cmd_get_func_config(adapter);
- if (!status)
- profile_present = true;
- } else if (BE3_chip(adapter) && be_physfn(adapter)) {
- be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
- }
-
- if (profile_present) {
- /* Sanity fixes for Lancer */
- adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
- BE_UC_PMAC_COUNT);
- adapter->max_vlans = min_t(u16, adapter->max_vlans,
- BE_NUM_VLANS_SUPPORTED);
- adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
- BE_MAX_MC);
- adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
- MAX_TX_QS);
- adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
- BE3_MAX_RSS_QS);
- adapter->max_event_queues = min_t(u16,
- adapter->max_event_queues,
- BE3_MAX_RSS_QS);
-
- if (adapter->max_rss_queues &&
- adapter->max_rss_queues == adapter->max_rx_queues)
- adapter->max_rss_queues -= 1;
-
- if (adapter->max_event_queues < adapter->max_rss_queues)
- adapter->max_rss_queues = adapter->max_event_queues;
-
- } else {
- if (be_physfn(adapter))
- adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
- else
- adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
-
- if (adapter->function_mode & FLEX10_MODE)
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
- else
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ status = be_cmd_get_func_config(adapter, &res);
+ if (status)
+ return status;
- adapter->max_mcast_mac = BE_MAX_MC;
- adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
- adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
- MAX_TX_QS);
- adapter->max_rss_queues = (adapter->be3_native) ?
- BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
- adapter->max_event_queues = BE3_MAX_RSS_QS;
+ /* If RoCE may be enabled stash away half the EQs for RoCE */
+ if (be_roce_supported(adapter))
+ res.max_evt_qs /= 2;
+ adapter->res = res;
- adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST |
- BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS;
+ if (be_physfn(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res, 0);
+ if (status)
+ return status;
+ adapter->res.max_vfs = res.max_vfs;
+ }
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
- adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
+ dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
+ be_max_txqs(adapter), be_max_rxqs(adapter),
+ be_max_rss(adapter), be_max_eqs(adapter),
+ be_max_vfs(adapter));
+ dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
+ be_max_uc(adapter), be_max_mc(adapter),
+ be_max_vlans(adapter));
}
- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
- &dev_num_vfs);
- if (BE3_chip(adapter))
- dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
- adapter->dev_num_vfs = dev_num_vfs;
- }
+ return 0;
}
/* Routine to query per function resource limits */
@@ -3095,100 +3061,171 @@ static int be_get_config(struct be_adapter *adapter)
&adapter->function_caps,
&adapter->asic_rev);
if (status)
- goto err;
+ return status;
- be_get_resources(adapter);
+ status = be_get_resources(adapter);
+ if (status)
+ return status;
/* primary mac needs 1 pmac entry */
- adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
- sizeof(u32), GFP_KERNEL);
- if (!adapter->pmac_id) {
- status = -ENOMEM;
- goto err;
- }
+ adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
+ GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
-err:
- return status;
+ /* Sanitize cfg_num_qs based on HW and platform limits */
+ adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
+
+ return 0;
}
-static int be_setup(struct be_adapter *adapter)
+static int be_mac_setup(struct be_adapter *adapter)
{
- struct device *dev = &adapter->pdev->dev;
- u32 en_flags;
- u32 tx_fc, rx_fc;
- int status;
u8 mac[ETH_ALEN];
- bool active_mac;
+ int status;
- be_setup_init(adapter);
+ if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
+ status = be_cmd_get_perm_mac(adapter, mac);
+ if (status)
+ return status;
- if (!lancer_chip(adapter))
- be_cmd_req_native_mode(adapter);
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ } else {
+ /* Maybe the HW was reset; dev_addr must be re-programmed */
+ memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
+ }
- status = be_get_config(adapter);
+ /* On BE3 VFs this cmd may fail due to lack of privilege.
+ * Ignore the failure as in this case pmac_id is fetched
+ * in the IFACE_CREATE cmd.
+ */
+ be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ return 0;
+}
+
+static void be_schedule_worker(struct be_adapter *adapter)
+{
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+}
+
+static int be_setup_queues(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ status = be_evt_queues_create(adapter);
if (status)
goto err;
- status = be_msix_enable(adapter);
+ status = be_tx_qs_create(adapter);
if (status)
goto err;
- status = be_evt_queues_create(adapter);
+ status = be_rx_cqs_create(adapter);
if (status)
goto err;
- status = be_tx_cqs_create(adapter);
+ status = be_mcc_queues_create(adapter);
if (status)
goto err;
- status = be_rx_cqs_create(adapter);
+ status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
if (status)
goto err;
- status = be_mcc_queues_create(adapter);
+ status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
if (status)
goto err;
- be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
- /* In UMC mode FW does not return right privileges.
- * Override with correct privilege equivalent to PF.
+ return 0;
+err:
+ dev_err(&adapter->pdev->dev, "queue_setup failed\n");
+ return status;
+}
+
+int be_update_queues(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ if (netif_running(netdev))
+ be_close(netdev);
+
+ be_cancel_worker(adapter);
+
+ /* If any vectors have been shared with RoCE we cannot re-program
+ * the MSIx table.
*/
- if (be_is_mc(adapter))
- adapter->cmd_privileges = MAX_PRIVILEGES;
+ if (!adapter->num_msix_roce_vec)
+ be_msix_disable(adapter);
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ be_clear_queues(adapter);
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
- en_flags |= BE_IF_FLAGS_RSS;
+ if (!msix_enabled(adapter)) {
+ status = be_msix_enable(adapter);
+ if (status)
+ return status;
+ }
- en_flags = en_flags & adapter->if_cap_flags;
+ status = be_setup_queues(adapter);
+ if (status)
+ return status;
- status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
- &adapter->if_handle, 0);
- if (status != 0)
+ be_schedule_worker(adapter);
+
+ if (netif_running(netdev))
+ status = be_open(netdev);
+
+ return status;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u32 tx_fc, rx_fc, en_flags;
+ int status;
+
+ be_setup_init(adapter);
+
+ if (!lancer_chip(adapter))
+ be_cmd_req_native_mode(adapter);
+
+ status = be_get_config(adapter);
+ if (status)
goto err;
- memset(mac, 0, ETH_ALEN);
- active_mac = false;
- status = be_get_mac_addr(adapter, mac, adapter->if_handle,
- &active_mac, &adapter->pmac_id[0]);
- if (status != 0)
+ status = be_msix_enable(adapter);
+ if (status)
goto err;
- if (!active_mac) {
- status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
- if (status != 0)
- goto err;
- }
+ en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
+ en_flags |= BE_IF_FLAGS_RSS;
+ en_flags = en_flags & be_if_cap_flags(adapter);
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
+ if (status)
+ goto err;
- if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
+ /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
+ rtnl_lock();
+ status = be_setup_queues(adapter);
+ rtnl_unlock();
+ if (status)
+ goto err;
- status = be_tx_qs_create(adapter);
+ be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
+ /* In UMC mode FW does not return right privileges.
+ * Override with correct privilege equivalent to PF.
+ */
+ if (be_is_mc(adapter))
+ adapter->cmd_privileges = MAX_PRIVILEGES;
+
+ status = be_mac_setup(adapter);
if (status)
goto err;
@@ -3205,8 +3242,8 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (be_physfn(adapter)) {
- if (adapter->dev_num_vfs)
+ if (be_physfn(adapter) && num_vfs) {
+ if (be_max_vfs(adapter))
be_vf_setup(adapter);
else
dev_warn(dev, "device doesn't support SRIOV\n");
@@ -3216,8 +3253,7 @@ static int be_setup(struct be_adapter *adapter)
if (!status && be_pause_supported(adapter))
adapter->phy.fc_autoneg = 1;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
- adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+ be_schedule_worker(adapter);
return 0;
err:
be_clear(adapter);
@@ -3241,7 +3277,7 @@ static void be_netpoll(struct net_device *netdev)
#endif
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
@@ -3298,7 +3334,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
}
-struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
+static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
int header_size,
const struct firmware *fw)
{
@@ -3760,6 +3796,74 @@ fw_exit:
return status;
}
+static int be_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem;
+ int status = 0;
+ u16 mode = 0;
+
+ if (!sriov_enabled(adapter))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
+ return -EINVAL;
+
+ status = be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ mode == BRIDGE_MODE_VEPA ?
+ PORT_FWD_TYPE_VEPA :
+ PORT_FWD_TYPE_VEB);
+ if (status)
+ goto err;
+
+ dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+ return status;
+ }
+err:
+ dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+ return status;
+}
+
+static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 filter_mask)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ int status = 0;
+ u8 hsw_mode;
+
+ if (!sriov_enabled(adapter))
+ return 0;
+
+ /* BE and Lancer chips support VEB mode only */
+ if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ hsw_mode = PORT_FWD_TYPE_VEB;
+ } else {
+ status = be_cmd_get_hsw_config(adapter, NULL, 0,
+ adapter->if_handle, &hsw_mode);
+ if (status)
+ return 0;
+ }
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ hsw_mode == PORT_FWD_TYPE_VEPA ?
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
+}
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -3778,13 +3882,13 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
+ .ndo_bridge_setlink = be_ndo_bridge_setlink,
+ .ndo_bridge_getlink = be_ndo_bridge_getlink,
};
static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *eqo;
- int i;
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
@@ -3807,9 +3911,6 @@ static void be_netdev_init(struct net_device *netdev)
netdev->netdev_ops = &be_netdev_ops;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
-
- for_all_evt_queues(adapter, eqo, i)
- netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -3916,9 +4017,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
- rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
- &rx_filter->dma,
- GFP_KERNEL | __GFP_ZERO);
+ rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
+ rx_filter->size, &rx_filter->dma,
+ GFP_KERNEL);
if (rx_filter->va == NULL) {
status = -ENOMEM;
goto free_mbox;
@@ -3964,8 +4065,8 @@ static int be_stats_init(struct be_adapter *adapter)
/* BE3 and Skyhawk */
cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
- cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
- GFP_KERNEL | __GFP_ZERO);
+ cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+ GFP_KERNEL);
if (cmd->va == NULL)
return -1;
return 0;
@@ -4072,6 +4173,7 @@ static int be_get_initial_config(struct be_adapter *adapter)
level = be_get_fw_log_level(adapter);
adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ adapter->cfg_num_qs = netif_get_num_default_rss_queues();
return 0;
}
@@ -4164,7 +4266,8 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
- if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ if (be_physfn(adapter) &&
+ MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
be_cmd_get_die_temperature(adapter);
for_all_rx_queues(adapter, rxo, i) {
@@ -4253,7 +4356,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
status = pci_enable_pcie_error_reporting(pdev);
if (status)
- dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
+ dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
status = be_ctrl_init(adapter);
if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index f3d126dcc104..9cd5415fe017 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -60,7 +60,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
*/
num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec;
dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX;
- dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS);
+ dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS);
/* provide start index of the vector,
* so in case of linear usage,
* it can use the base as starting point.
@@ -93,7 +93,7 @@ void be_roce_dev_add(struct be_adapter *adapter)
}
}
-void _be_roce_dev_remove(struct be_adapter *adapter)
+static void _be_roce_dev_remove(struct be_adapter *adapter)
{
if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
ocrdma_drv->remove(adapter->ocrdma_dev);
@@ -110,7 +110,7 @@ void be_roce_dev_remove(struct be_adapter *adapter)
}
}
-void _be_roce_dev_open(struct be_adapter *adapter)
+static void _be_roce_dev_open(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
@@ -126,7 +126,7 @@ void be_roce_dev_open(struct be_adapter *adapter)
}
}
-void _be_roce_dev_close(struct be_adapter *adapter)
+static void _be_roce_dev_close(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 276572998463..2cd1129e19af 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -29,7 +29,7 @@ enum be_interrupt_mode {
BE_INTERRUPT_MODE_MSI = 2,
};
-#define MAX_ROCE_MSIX_VECTORS 16
+#define MAX_MSIX_VECTORS 32
struct be_dev_info {
u8 __iomem *db;
u64 unmapped_db;
@@ -45,7 +45,7 @@ struct be_dev_info {
struct {
int num_vectors;
int start_vector;
- u32 vector_list[MAX_ROCE_MSIX_VECTORS];
+ u32 vector_list[MAX_MSIX_VECTORS];
} msix;
};
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index cf579fb39bc5..4de8cfd149cf 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1030,8 +1030,8 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Allow the platform setup code to pass in a MAC address. */
- if (pdev->dev.platform_data) {
- struct ethoc_platform_data *pdata = pdev->dev.platform_data;
+ if (dev_get_platdata(&pdev->dev)) {
+ struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 934e1ae279f0..212f44b3a773 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -778,10 +778,9 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
{
int i;
- priv->descs = dma_alloc_coherent(priv->dev,
- sizeof(struct ftgmac100_descs),
- &priv->descs_dma_addr,
- GFP_KERNEL | __GFP_ZERO);
+ priv->descs = dma_zalloc_coherent(priv->dev,
+ sizeof(struct ftgmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 4658f4cc1969..8be5b40c0a12 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,10 +732,10 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
{
int i;
- priv->descs = dma_alloc_coherent(priv->dev,
- sizeof(struct ftmac100_descs),
- &priv->descs_dma_addr,
- GFP_KERNEL | __GFP_ZERO);
+ priv->descs = dma_zalloc_coherent(priv->dev,
+ sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr,
+ GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ae236009f1a8..0120217a16dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -296,6 +296,9 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
+ unsigned short tx_ring_size;
+ unsigned short rx_ring_size;
+
struct platform_device *pdev;
int opened;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c610a2716be4..f9aacf5d8523 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -69,7 +69,6 @@ static void set_multicast_list(struct net_device *ndev);
#endif
#define DRIVER_NAME "fec"
-#define FEC_NAPI_WEIGHT 64
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5)
@@ -239,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
static int mii_cnt;
-static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex)
+static inline
+struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
{
- struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
- if (is_ex)
- return (struct bufdesc *)(ex + 1);
+ struct bufdesc *new_bd = bdp + 1;
+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
+ struct bufdesc_ex *ex_base;
+ struct bufdesc *base;
+ int ring_size;
+
+ if (bdp >= fep->tx_bd_base) {
+ base = fep->tx_bd_base;
+ ring_size = fep->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ } else {
+ base = fep->rx_bd_base;
+ ring_size = fep->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ }
+
+ if (fep->bufdesc_ex)
+ return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
+ ex_base : ex_new_bd);
else
- return bdp + 1;
+ return (new_bd >= (base + ring_size)) ?
+ base : new_bd;
}
-static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex)
+static inline
+struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
{
- struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
- if (is_ex)
- return (struct bufdesc *)(ex - 1);
+ struct bufdesc *new_bd = bdp - 1;
+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
+ struct bufdesc_ex *ex_base;
+ struct bufdesc *base;
+ int ring_size;
+
+ if (bdp >= fep->tx_bd_base) {
+ base = fep->tx_bd_base;
+ ring_size = fep->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ } else {
+ base = fep->rx_bd_base;
+ ring_size = fep->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ }
+
+ if (fep->bufdesc_ex)
+ return (struct bufdesc *)((ex_new_bd < ex_base) ?
+ (ex_new_bd + ring_size) : ex_new_bd);
else
- return bdp - 1;
+ return (new_bd < base) ? (new_bd + ring_size) : new_bd;
}
static void *swap_buffer(void *bufaddr, int len)
@@ -380,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
- bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp_pre = fec_enet_get_prevdesc(bdp, fep);
if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
!(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
fep->delay_work.trig_tx = true;
@@ -389,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/* If this was the last BD in the ring, start at the beginning again. */
- if (status & BD_ENET_TX_WRAP)
- bdp = fep->tx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
fep->cur_tx = bdp;
@@ -417,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < fep->rx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr)
bdp->cbd_sc = BD_ENET_RX_EMPTY;
else
bdp->cbd_sc = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
fep->cur_rx = fep->rx_bd_base;
@@ -436,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
fep->cur_tx = bdp;
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < fep->tx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
@@ -445,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev)
fep->tx_skbuff[i] = NULL;
}
bdp->cbd_bufaddr = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp;
}
@@ -510,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex)
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
if (fep->bufdesc_ex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
- * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
else
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
- * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
@@ -727,10 +758,7 @@ fec_enet_tx(struct net_device *ndev)
bdp = fep->dirty_tx;
/* get next bdp of dirty_tx */
- if (bdp->cbd_sc & BD_ENET_TX_WRAP)
- bdp = fep->tx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
@@ -800,10 +828,7 @@ fec_enet_tx(struct net_device *ndev)
fep->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
- if (status & BD_ENET_TX_WRAP)
- bdp = fep->tx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
/* Since we have freed up a buffer, the ring is no longer full
*/
@@ -993,10 +1018,8 @@ rx_processing_done:
}
/* Update BD pointer to next entry */
- if (status & BD_ENET_RX_WRAP)
- bdp = fep->rx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
@@ -1059,7 +1082,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
unsigned char *iap, tmpaddr[ETH_ALEN];
/*
@@ -1099,10 +1122,10 @@ static void fec_get_mac(struct net_device *ndev)
* 4) FEC mac registers set by bootloader
*/
if (!is_valid_ether_addr(iap)) {
- *((unsigned long *) &tmpaddr[0]) =
- be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
- *((unsigned short *) &tmpaddr[4]) =
- be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ *((__be32 *) &tmpaddr[0]) =
+ cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
+ *((__be16 *) &tmpaddr[4]) =
+ cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
iap = &tmpaddr[0];
}
@@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct bufdesc *bdp;
bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < fep->rx_ring_size; i++) {
skb = fep->rx_skbuff[i];
if (bdp->cbd_bufaddr)
@@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev)
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++)
+ for (i = 0; i < fep->tx_ring_size; i++)
kfree(fep->tx_bounce[i]);
}
@@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
struct bufdesc *bdp;
bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < fep->rx_ring_size; i++) {
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (!skb) {
fec_enet_free_buffers(ndev);
@@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_RX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < fep->tx_ring_size; i++) {
fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
bdp->cbd_sc = 0;
@@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
@@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev)
/* Get the Ethernet address */
fec_get_mac(ndev);
+ /* init the tx & rx ring size */
+ fep->tx_ring_size = TX_RING_SIZE;
+ fep->rx_ring_size = RX_RING_SIZE;
+
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
if (fep->bufdesc_ex)
fep->tx_bd_base = (struct bufdesc *)
- (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE);
+ (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
else
- fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+ fep->tx_bd_base = cbd_base + fep->rx_ring_size;
/* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT;
@@ -1980,7 +2007,7 @@ static int fec_enet_init(struct net_device *ndev)
ndev->ethtool_ops = &fec_enet_ethtool_ops;
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
- netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
/* enable hw VLAN support */
@@ -2055,10 +2082,6 @@ fec_probe(struct platform_device *pdev)
if (of_id)
pdev->id_entry = of_id->data;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENXIO;
-
/* Init network device */
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
if (!ndev)
@@ -2076,6 +2099,7 @@ fec_probe(struct platform_device *pdev)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fep->hwp = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(fep->hwp)) {
ret = PTR_ERR(fep->hwp);
@@ -2091,7 +2115,7 @@ fec_probe(struct platform_device *pdev)
ret = of_get_phy_mode(pdev->dev.of_node);
if (ret < 0) {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata)
fep->phy_interface = pdata->phy;
else
@@ -2125,10 +2149,25 @@ fec_probe(struct platform_device *pdev)
fep->bufdesc_ex = 0;
}
- clk_prepare_enable(fep->clk_ahb);
- clk_prepare_enable(fep->clk_ipg);
- clk_prepare_enable(fep->clk_enet_out);
- clk_prepare_enable(fep->clk_ptp);
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk;
+
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+
+ if (fep->clk_ptp) {
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret)
+ goto failed_clk_ptp;
+ }
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
@@ -2159,14 +2198,10 @@ fec_probe(struct platform_device *pdev)
ret = irq;
goto failed_irq;
}
- ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
- if (ret) {
- while (--i >= 0) {
- irq = platform_get_irq(pdev, i);
- free_irq(irq, ndev);
- }
+ ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
+ IRQF_DISABLED, pdev->name, ndev);
+ if (ret)
goto failed_irq;
- }
}
ret = fec_enet_mii_init(pdev);
@@ -2190,19 +2225,19 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_irq:
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
- clk_disable_unprepare(fep->clk_ahb);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
clk_disable_unprepare(fep->clk_ipg);
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ptp);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
failed_clk:
failed_ioremap:
free_netdev(ndev);
@@ -2215,25 +2250,21 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- int i;
cancel_delayed_work_sync(&(fep->delay_work.delay_work));
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
del_timer_sync(&fep->time_keep);
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- int irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- clk_disable_unprepare(fep->clk_ptp);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ahb);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
clk_disable_unprepare(fep->clk_ipg);
+ clk_disable_unprepare(fep->clk_ahb);
free_netdev(ndev);
return 0;
@@ -2250,9 +2281,12 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ahb);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
clk_disable_unprepare(fep->clk_ipg);
+ clk_disable_unprepare(fep->clk_ahb);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
@@ -2273,15 +2307,44 @@ fec_resume(struct device *dev)
return ret;
}
- clk_prepare_enable(fep->clk_enet_out);
- clk_prepare_enable(fep->clk_ahb);
- clk_prepare_enable(fep->clk_ipg);
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk_ahb;
+
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+
+ if (fep->clk_ptp) {
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret)
+ goto failed_clk_ptp;
+ }
+
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
}
return 0;
+
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+ return ret;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 360a578c2bb7..e0528900db02 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -123,12 +123,10 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
static int mpc52xx_fec_mdio_remove(struct platform_device *of)
{
- struct device *dev = &of->dev;
- struct mii_bus *bus = dev_get_drvdata(dev);
+ struct mii_bus *bus = platform_get_drvdata(of);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
- dev_set_drvdata(dev, NULL);
iounmap(priv->regs);
kfree(priv);
mdiobus_free(bus);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 8de53a14a6f4..6b60582ce8cf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -583,7 +583,6 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
struct sk_buff *skb)
{
struct sk_buff *new_skb;
- struct fs_enet_private *fep = netdev_priv(dev);
/* Alloc new skb */
new_skb = netdev_alloc_skb(dev, skb->len + 4);
@@ -1000,6 +999,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
struct fs_enet_private *fep;
struct fs_platform_info *fpi;
const u32 *data;
+ struct clk *clk;
+ int err;
const u8 *mac_addr;
const char *phy_connection_type;
int privsize, len, ret = -ENODEV;
@@ -1037,6 +1038,20 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->use_rmii = 1;
}
+ /* make clock lookup non-fatal (the driver is shared among platforms),
+ * but require enable to succeed when a clock was specified/found,
+ * keep a reference to the clock upon successful acquisition
+ */
+ clk = devm_clk_get(&ofdev->dev, "per");
+ if (!IS_ERR(clk)) {
+ err = clk_prepare_enable(clk);
+ if (err) {
+ ret = err;
+ goto out_free_fpi;
+ }
+ fpi->clk_per = clk;
+ }
+
privsize = sizeof(*fep) +
sizeof(struct sk_buff **) *
(fpi->rx_ring + fpi->tx_ring);
@@ -1108,6 +1123,8 @@ out_free_dev:
free_netdev(ndev);
out_put:
of_node_put(fpi->phy_node);
+ if (fpi->clk_per)
+ clk_disable_unprepare(fpi->clk_per);
out_free_fpi:
kfree(fpi);
return ret;
@@ -1124,6 +1141,8 @@ static int fs_enet_remove(struct platform_device *ofdev)
fep->ops->cleanup_data(ndev);
dev_set_drvdata(fep->dev, NULL);
of_node_put(fep->fpi->phy_node);
+ if (fep->fpi->clk_per)
+ clk_disable_unprepare(fep->fpi->clk_per);
free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c93a05654b46..c4f65067cf7c 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -409,7 +409,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
priv->regs = priv->map + data->mii_offset;
new_bus->parent = &pdev->dev;
- dev_set_drvdata(&pdev->dev, new_bus);
+ platform_set_drvdata(pdev, new_bus);
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
@@ -468,8 +468,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(bus);
- dev_set_drvdata(device, NULL);
-
iounmap(priv->map);
mdiobus_free(bus);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 8d2db7b808b7..c4eaadeb572f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -593,7 +593,6 @@ static int gfar_parse_group(struct device_node *np,
return -EINVAL;
}
- grp->grp_id = priv->num_grps;
grp->priv = priv;
spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
@@ -1017,7 +1016,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* We need to delay at least 3 TX clocks */
udelay(2);
- tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ tempval = 0;
+ if (!priv->pause_aneg_en && priv->tx_pause_en)
+ tempval |= MACCFG1_TX_FLOW;
+ if (!priv->pause_aneg_en && priv->rx_pause_en)
+ tempval |= MACCFG1_RX_FLOW;
+ /* the soft reset bit is not self-resetting, so we need to
+ * clear it before resuming normal operation
+ */
gfar_write(&regs->maccfg1, tempval);
/* Initialize MACCFG2. */
@@ -1461,7 +1467,7 @@ static int init_phy(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
uint gigabit_support =
priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- SUPPORTED_1000baseT_Full : 0;
+ GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
priv->oldlink = 0;
@@ -2052,6 +2058,24 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
return skip_txbd(bdp, 1, base, ring_size);
}
+/* eTSEC12: csum generation not supported for some fcb offsets */
+static inline bool gfar_csum_errata_12(struct gfar_private *priv,
+ unsigned long fcb_addr)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
+ (fcb_addr % 0x20) > 0x18);
+}
+
+/* eTSEC76: csum generation for frames larger than 2500 may
+ * cause excess delays before start of transmission
+ */
+static inline bool gfar_csum_errata_76(struct gfar_private *priv,
+ unsigned int len)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
+ (len > 2500));
+}
+
/* This is called by the kernel when a frame is ready for transmission.
* It is pointed to by the dev->hard_start_xmit function pointer
*/
@@ -2064,23 +2088,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct txfcb *fcb = NULL;
struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
- int i, rq = 0, do_tstamp = 0;
+ int i, rq = 0;
+ int do_tstamp, do_csum, do_vlan;
u32 bufaddr;
unsigned long flags;
- unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
-
- /* TOE=1 frames larger than 2500 bytes may see excess delays
- * before start of transmission.
- */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
- skb->ip_summed == CHECKSUM_PARTIAL &&
- skb->len > 2500)) {
- int ret;
-
- ret = skb_checksum_help(skb);
- if (ret)
- return ret;
- }
+ unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
rq = skb->queue_mapping;
tx_queue = priv->tx_queue[rq];
@@ -2088,21 +2100,23 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
+ do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
+ do_vlan = vlan_tx_tag_present(skb);
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
+
+ if (do_csum || do_vlan)
+ fcb_len = GMAC_FCB_LEN;
+
/* check if time stamp should be generated */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->hwts_tx_en)) {
- do_tstamp = 1;
- fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
- }
+ if (unlikely(do_tstamp))
+ fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
- if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- vlan_tx_tag_present(skb) ||
- unlikely(do_tstamp)) &&
- (skb_headroom(skb) < fcb_length)) {
+ if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
struct sk_buff *skb_new;
- skb_new = skb_realloc_headroom(skb, fcb_length);
+ skb_new = skb_realloc_headroom(skb, fcb_len);
if (!skb_new) {
dev->stats.tx_errors++;
kfree_skb(skb);
@@ -2133,7 +2147,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Update transmit stats */
- tx_queue->stats.tx_bytes += skb->len;
+ bytes_sent = skb->len;
+ tx_queue->stats.tx_bytes += bytes_sent;
+ /* keep Tx bytes on wire for BQL accounting */
+ GFAR_CB(skb)->bytes_sent = bytes_sent;
tx_queue->stats.tx_packets++;
txbdp = txbdp_start = tx_queue->cur_tx;
@@ -2153,12 +2170,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
/* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) {
+ unsigned int frag_len;
/* Point at the next BD, wrapping as needed */
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
- length = skb_shinfo(skb)->frags[i].size;
+ frag_len = skb_shinfo(skb)->frags[i].size;
- lstatus = txbdp->lstatus | length |
+ lstatus = txbdp->lstatus | frag_len |
BD_LFLAG(TXBD_READY);
/* Handle the last BD specially */
@@ -2168,7 +2186,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
bufaddr = skb_frag_dma_map(priv->dev,
&skb_shinfo(skb)->frags[i],
0,
- length,
+ frag_len,
DMA_TO_DEVICE);
/* set the TxBD length and buffer pointer */
@@ -2185,36 +2203,38 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(skb->data, 0, GMAC_TXPAL_LEN);
}
- /* Set up checksumming */
- if (CHECKSUM_PARTIAL == skb->ip_summed) {
+ /* Add TxFCB if required */
+ if (fcb_len) {
fcb = gfar_add_fcb(skb);
- /* as specified by errata */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
- ((unsigned long)fcb % 0x20) > 0x18)) {
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
+ /* Set up checksumming */
+ if (do_csum) {
+ gfar_tx_checksum(skb, fcb, fcb_len);
+
+ if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
+ unlikely(gfar_csum_errata_76(priv, skb->len))) {
__skb_pull(skb, GMAC_FCB_LEN);
skb_checksum_help(skb);
- } else {
- lstatus |= BD_LFLAG(TXBD_TOE);
- gfar_tx_checksum(skb, fcb, fcb_length);
+ if (do_vlan || do_tstamp) {
+ /* put back a new fcb for vlan/tstamp TOE */
+ fcb = gfar_add_fcb(skb);
+ } else {
+ /* Tx TOE not used */
+ lstatus &= ~(BD_LFLAG(TXBD_TOE));
+ fcb = NULL;
+ }
}
}
- if (vlan_tx_tag_present(skb)) {
- if (unlikely(NULL == fcb)) {
- fcb = gfar_add_fcb(skb);
- lstatus |= BD_LFLAG(TXBD_TOE);
- }
-
+ if (do_vlan)
gfar_tx_vlan(skb, fcb);
- }
/* Setup tx hardware time stamping if requested */
if (unlikely(do_tstamp)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- if (fcb == NULL)
- fcb = gfar_add_fcb(skb);
fcb->ptp = 1;
- lstatus |= BD_LFLAG(TXBD_TOE);
}
txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
@@ -2226,15 +2246,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the full frame length.
*/
if (unlikely(do_tstamp)) {
- txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - fcb_length);
+ (skb_headlen(skb) - fcb_len);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
- netdev_tx_sent_queue(txq, skb->len);
+ netdev_tx_sent_queue(txq, bytes_sent);
/* We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock
@@ -2554,7 +2574,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
}
- bytes_sent += skb->len;
+ bytes_sent += GFAR_CB(skb)->bytes_sent;
dev_kfree_skb_any(skb);
@@ -3014,6 +3034,41 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
return IRQ_HANDLED;
}
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+ u32 val = 0;
+
+ if (!phydev->duplex)
+ return val;
+
+ if (!priv->pause_aneg_en) {
+ if (priv->tx_pause_en)
+ val |= MACCFG1_TX_FLOW;
+ if (priv->rx_pause_en)
+ val |= MACCFG1_RX_FLOW;
+ } else {
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_TX)
+ val |= MACCFG1_TX_FLOW;
+ if (flowctrl & FLOW_CTRL_RX)
+ val |= MACCFG1_RX_FLOW;
+ }
+
+ return val;
+}
+
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the phydev structure, and this
@@ -3032,6 +3087,7 @@ static void adjust_link(struct net_device *dev)
lock_tx_qs(priv);
if (phydev->link) {
+ u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -3080,6 +3136,10 @@ static void adjust_link(struct net_device *dev)
priv->oldspeed = phydev->speed;
}
+ tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+ gfar_write(&regs->maccfg1, tempval1);
gfar_write(&regs->maccfg2, tempval);
gfar_write(&regs->ecntrl, ecntrl);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04b552cd419d..04112b98ff5d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -146,6 +146,10 @@ extern const char gfar_driver_version[];
| SUPPORTED_Autoneg \
| SUPPORTED_MII)
+#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \
+ | SUPPORTED_Pause \
+ | SUPPORTED_Asym_Pause)
+
/* TBI register addresses */
#define MII_TBICON 0x11
@@ -571,7 +575,7 @@ struct rxfcb {
};
struct gianfar_skb_cb {
- int alignamount;
+ unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */
};
#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
@@ -1009,7 +1013,6 @@ struct gfar_irqinfo {
* @napi: the napi poll function
* @priv: back pointer to the priv structure
* @regs: the ioremapped register space for this group
- * @grp_id: group id for this group
* @irqinfo: TX/RX/ER irq data for this group
*/
@@ -1018,11 +1021,10 @@ struct gfar_priv_grp {
struct napi_struct napi;
struct gfar_private *priv;
struct gfar __iomem *regs;
- unsigned int grp_id;
+ unsigned int rstat;
unsigned long num_rx_queues;
unsigned long rx_bit_map;
/* cacheline 3 */
- unsigned int rstat;
unsigned int tstat;
unsigned long num_tx_queues;
unsigned long tx_bit_map;
@@ -1102,7 +1104,11 @@ struct gfar_private {
/* Wake-on-LAN enabled */
wol_en:1,
/* Enable priorty based Tx scheduling in Hw */
- prio_sched_en:1;
+ prio_sched_en:1,
+ /* Flow control flags */
+ pause_aneg_en:1,
+ tx_pause_en:1,
+ rx_pause_en:1;
/* The total tx and rx ring size for the enabled queues */
unsigned int total_tx_ring_size;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 21cd88124ca9..d3d7ede27ef1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -535,6 +535,78 @@ static int gfar_sringparam(struct net_device *dev,
return err;
}
+static void gfar_gpauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ epause->autoneg = !!priv->pause_aneg_en;
+ epause->rx_pause = !!priv->rx_pause_en;
+ epause->tx_pause = !!priv->tx_pause_en;
+}
+
+static int gfar_spauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 oldadv, newadv;
+
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+ return -EINVAL;
+
+ priv->rx_pause_en = priv->tx_pause_en = 0;
+ if (epause->rx_pause) {
+ priv->rx_pause_en = 1;
+
+ if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ /* FLOW_CTRL_RX & TX */
+ newadv = ADVERTISED_Pause;
+ } else /* FLOW_CTLR_RX */
+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ } else if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ /* FLOW_CTLR_TX */
+ newadv = ADVERTISED_Asym_Pause;
+ } else
+ newadv = 0;
+
+ if (epause->autoneg)
+ priv->pause_aneg_en = 1;
+ else
+ priv->pause_aneg_en = 0;
+
+ oldadv = phydev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (oldadv != newadv) {
+ phydev->advertising &=
+ ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ phydev->advertising |= newadv;
+ if (phydev->autoneg)
+ /* inform link partner of our
+ * new flow ctrl settings
+ */
+ return phy_start_aneg(phydev);
+
+ if (!epause->autoneg) {
+ u32 tempval;
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ if (priv->tx_pause_en)
+ tempval |= MACCFG1_TX_FLOW;
+ if (priv->rx_pause_en)
+ tempval |= MACCFG1_RX_FLOW;
+ gfar_write(&regs->maccfg1, tempval);
+ }
+ }
+
+ return 0;
+}
+
int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -1806,6 +1878,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
.set_coalesce = gfar_scoalesce,
.get_ringparam = gfar_gringparam,
.set_ringparam = gfar_sringparam,
+ .get_pauseparam = gfar_gpauseparam,
+ .set_pauseparam = gfar_spauseparam,
.get_strings = gfar_gstrings,
.get_sset_count = gfar_sset_count,
.get_ethtool_stats = gfar_fill_stats,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3c43dac894ec..5930c39672db 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3911,14 +3911,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
static int ucc_geth_remove(struct platform_device* ofdev)
{
- struct device *device = &ofdev->dev;
- struct net_device *dev = dev_get_drvdata(device);
+ struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
unregister_netdev(dev);
free_netdev(dev);
ucc_geth_memclean(ugeth);
- dev_set_drvdata(device, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 93346f00486b..79aef681ac85 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -133,8 +133,8 @@ struct rfd_struct
unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
unsigned short next; /* linkoffset to next RFD */
unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
- unsigned char dest[6]; /* ethernet-address, destination */
- unsigned char source[6]; /* ethernet-address, source */
+ unsigned char dest[ETH_ALEN]; /* ethernet-address, destination */
+ unsigned char source[ETH_ALEN]; /* ethernet-address, source */
unsigned short length; /* 802.3 frame-length */
unsigned short zero_dummy; /* dummy */
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index d300a0c0eafc..6b5c7222342c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2312,7 +2312,7 @@ static int emac_check_deps(struct emac_instance *dev,
if (deps[i].ofdev == NULL)
continue;
if (deps[i].drvdata == NULL)
- deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
+ deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
if (deps[i].drvdata != NULL)
there++;
}
@@ -2799,9 +2799,9 @@ static int emac_probe(struct platform_device *ofdev)
/* display more info about what's missing ? */
goto err_reg_unmap;
}
- dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
+ dev->mal = platform_get_drvdata(dev->mal_dev);
if (dev->mdio_dev != NULL)
- dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
+ dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
/* Register with MAL */
dev->commac.ops = &emac_commac_ops;
@@ -2892,7 +2892,7 @@ static int emac_probe(struct platform_device *ofdev)
* fully initialized
*/
wmb();
- dev_set_drvdata(&ofdev->dev, dev);
+ platform_set_drvdata(ofdev, dev);
/* There's a new kid in town ! Let's tell everybody */
wake_up_all(&emac_probe_wait);
@@ -2951,12 +2951,10 @@ static int emac_probe(struct platform_device *ofdev)
static int emac_remove(struct platform_device *ofdev)
{
- struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct emac_instance *dev = platform_get_drvdata(ofdev);
DBG(dev, "remove" NL);
- dev_set_drvdata(&ofdev->dev, NULL);
-
unregister_netdev(dev->ndev);
cancel_work_sync(&dev->reset_work);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 856ea66c9223..dac564c25440 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,8 +637,8 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL | __GFP_ZERO);
+ mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL);
if (mal->bd_virt == NULL) {
err = -ENOMEM;
goto fail_unmap;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 70fd55968844..5d41aee69d16 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -106,7 +106,7 @@ struct ibmveth_stat ibmveth_stats[] = {
/* simple methods of getting data from the current rxq entry */
static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
{
- return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
+ return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
}
static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
@@ -132,7 +132,7 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
{
- return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
+ return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
}
static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 43a794fab9ff..84066bafe057 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -164,14 +164,26 @@ struct ibmveth_adapter {
u64 tx_send_failed;
};
+/*
+ * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
+ * so we don't need to byteswap the two elements. However since we use
+ * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
+ * do end up with endian specific ordering of the elements and that
+ * needs correcting.
+ */
struct ibmveth_buf_desc_fields {
+#ifdef __BIG_ENDIAN
+ u32 flags_len;
+ u32 address;
+#else
+ u32 address;
u32 flags_len;
+#endif
#define IBMVETH_BUF_VALID 0x80000000
#define IBMVETH_BUF_TOGGLE 0x40000000
#define IBMVETH_BUF_NO_CSUM 0x02000000
#define IBMVETH_BUF_CSUM_GOOD 0x01000000
#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
- u32 address;
};
union ibmveth_buf_desc {
@@ -180,7 +192,7 @@ union ibmveth_buf_desc {
};
struct ibmveth_rx_q_entry {
- u32 flags_off;
+ __be32 flags_off;
#define IBMVETH_RXQ_TOGGLE 0x80000000
#define IBMVETH_RXQ_TOGGLE_SHIFT 31
#define IBMVETH_RXQ_VALID 0x40000000
@@ -188,7 +200,8 @@ struct ibmveth_rx_q_entry {
#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
- u32 length;
+ __be32 length;
+ /* correlator is only used by the OS, no need to byte swap */
u64 correlator;
};
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 1fde90b96685..bdf5023724e7 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -1004,7 +1004,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
/* Check to see if the NIC has been initialized via nic_open,
* before trying to read statistic registers.
*/
- if (!test_bit(__LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
return &sp->stats;
sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5115ae76a5d1..ada6e210279f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1175,15 +1175,12 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
}
- netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
- "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
- netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
- "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
- netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
- "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
+ c + 0);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
+ c + 8);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
+ c + 16);
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 82a967c95598..73a8aeefb92a 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1019,8 +1019,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL | __GFP_ZERO);
+ txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
@@ -1077,8 +1077,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL | __GFP_ZERO);
+ rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 6;
goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 4c303e2a7cb3..8fed74e3fa53 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1011,6 +1011,11 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* Must release MDIO ownership and mutex after MAC reset. */
switch (hw->mac.type) {
+ case e1000_82573:
+ /* Release mutex only if the hw semaphore is acquired */
+ if (!ret_val)
+ e1000_put_hw_semaphore_82573(hw);
+ break;
case e1000_82574:
case e1000_82583:
/* Release mutex only if the hw semaphore is acquired */
@@ -2057,6 +2062,7 @@ const struct e1000_info e1000_82583_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ffbc08f56c40..ad0edd11015d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -90,9 +90,6 @@ struct e1000_info;
#define E1000_MNG_VLAN_NONE (-1)
-/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
-
#define DEFAULT_JUMBO 9234
/* Time to wait before putting the device into D3 if there's no link (in ms). */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 59c22bf18701..a8633b8f0ac5 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -173,7 +173,7 @@ static int e1000_get_settings(struct net_device *netdev,
speed = adapter->link_speed;
ecmd->duplex = adapter->link_duplex - 1;
}
- } else {
+ } else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
@@ -264,6 +264,9 @@ static int e1000_set_settings(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ int ret_val = 0;
+
+ pm_runtime_get_sync(netdev->dev.parent);
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
@@ -271,7 +274,8 @@ static int e1000_set_settings(struct net_device *netdev,
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out;
}
/* MDI setting is only allowed when autoneg enabled because
@@ -279,13 +283,16 @@ static int e1000_set_settings(struct net_device *netdev,
* duplex is forced.
*/
if (ecmd->eth_tp_mdix_ctrl) {
- if (hw->phy.media_type != e1000_media_type_copper)
- return -EOPNOTSUPP;
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ ret_val = -EOPNOTSUPP;
+ goto out;
+ }
if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
(ecmd->autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out;
}
}
@@ -307,8 +314,8 @@ static int e1000_set_settings(struct net_device *netdev,
u32 speed = ethtool_cmd_speed(ecmd);
/* calling this overrides forced MDI setting */
if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
- clear_bit(__E1000_RESETTING, &adapter->state);
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out;
}
}
@@ -331,8 +338,10 @@ static int e1000_set_settings(struct net_device *netdev,
e1000e_reset(adapter);
}
+out:
+ pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
- return 0;
+ return ret_val;
}
static void e1000_get_pauseparam(struct net_device *netdev,
@@ -366,6 +375,8 @@ static int e1000_set_pauseparam(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
@@ -398,6 +409,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
}
out:
+ pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return retval;
}
@@ -428,6 +440,8 @@ static void e1000_get_regs(struct net_device *netdev,
u32 *regs_buff = p;
u16 phy_data;
+ pm_runtime_get_sync(netdev->dev.parent);
+
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
@@ -472,6 +486,8 @@ static void e1000_get_regs(struct net_device *netdev,
e1e_rphy(hw, MII_STAT1000, &phy_data);
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+
+ pm_runtime_put_sync(netdev->dev.parent);
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -504,6 +520,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
if (!eeprom_buff)
return -ENOMEM;
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
ret_val = e1000_read_nvm(hw, first_word,
last_word - first_word + 1,
@@ -517,6 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
}
}
+ pm_runtime_put_sync(netdev->dev.parent);
+
if (ret_val) {
/* a read error occurred, throw away the result */
memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -566,6 +586,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
@@ -606,6 +628,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000e_update_nvm_checksum(hw);
out:
+ pm_runtime_put_sync(netdev->dev.parent);
kfree(eeprom_buff);
return ret_val;
}
@@ -701,6 +724,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
}
}
+ pm_runtime_get_sync(netdev->dev.parent);
+
e1000e_down(adapter);
/* We can't just free everything and then setup again, because the
@@ -739,6 +764,7 @@ err_setup_rx:
e1000e_free_tx_resources(temp_tx);
err_setup:
e1000e_up(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
free_temp:
vfree(temp_tx);
vfree(temp_rx);
@@ -1639,7 +1665,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
- if (jiffies >= (time + 20)) {
+ if (time_after(jiffies, time + 20)) {
ret_val = 14; /* error code for time out error */
break;
}
@@ -1732,6 +1758,8 @@ static void e1000_diag_test(struct net_device *netdev,
u8 autoneg;
bool if_running = netif_running(netdev);
+ pm_runtime_get_sync(netdev->dev.parent);
+
set_bit(__E1000_TESTING, &adapter->state);
if (!if_running) {
@@ -1817,6 +1845,8 @@ static void e1000_diag_test(struct net_device *netdev,
}
msleep_interruptible(4 * 1000);
+
+ pm_runtime_put_sync(netdev->dev.parent);
}
static void e1000_get_wol(struct net_device *netdev,
@@ -1891,6 +1921,8 @@ static int e1000_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (!hw->mac.ops.blink_led)
return 2; /* cycle on/off twice per second */
@@ -1902,6 +1934,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
hw->mac.ops.led_off(hw);
hw->mac.ops.cleanup_led(hw);
+ pm_runtime_put_sync(netdev->dev.parent);
break;
case ETHTOOL_ID_ON:
@@ -1912,6 +1945,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
hw->mac.ops.led_off(hw);
break;
}
+
return 0;
}
@@ -1950,11 +1984,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
adapter->itr_setting = adapter->itr & ~3;
}
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (adapter->itr_setting != 0)
e1000e_write_itr(adapter, adapter->itr);
else
e1000e_write_itr(adapter, 0);
+ pm_runtime_put_sync(netdev->dev.parent);
+
return 0;
}
@@ -1968,7 +2006,9 @@ static int e1000_nway_reset(struct net_device *netdev)
if (!adapter->hw.mac.autoneg)
return -EINVAL;
+ pm_runtime_get_sync(netdev->dev.parent);
e1000e_reinit_locked(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
return 0;
}
@@ -1982,7 +2022,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
int i;
char *p = NULL;
+ pm_runtime_get_sync(netdev->dev.parent);
+
e1000e_get_stats64(netdev, &net_stats);
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
@@ -2033,7 +2078,11 @@ static int e1000_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXFH: {
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 mrqc = er32(MRQC);
+ u32 mrqc;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+ mrqc = er32(MRQC);
+ pm_runtime_put_sync(netdev->dev.parent);
if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
@@ -2096,9 +2145,13 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
+ pm_runtime_get_sync(netdev->dev.parent);
+
ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
+ if (ret_val) {
+ pm_runtime_put_sync(netdev->dev.parent);
return -EBUSY;
+ }
/* EEE Capability */
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
@@ -2117,14 +2170,11 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
/* EEE PCS Status */
ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
+ if (ret_val)
+ goto release;
if (hw->phy.type == e1000_phy_82579)
phy_data <<= 8;
-release:
- hw->phy.ops.release(hw);
- if (ret_val)
- return -ENODATA;
-
/* Result of the EEE auto negotiation - there is no register that
* has the status of the EEE negotiation so do a best-guess based
* on whether Tx or Rx LPI indications have been received.
@@ -2136,7 +2186,14 @@ release:
edata->tx_lpi_enabled = true;
edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
- return 0;
+release:
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ ret_val = -ENODATA;
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ return ret_val;
}
static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
@@ -2169,12 +2226,16 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
+ pm_runtime_get_sync(netdev->dev.parent);
+
/* reset the link */
if (netif_running(netdev))
e1000e_reinit_locked(adapter);
else
e1000e_reset(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
+
return 0;
}
@@ -2212,19 +2273,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
-static int e1000e_ethtool_begin(struct net_device *netdev)
-{
- return pm_runtime_get_sync(netdev->dev.parent);
-}
-
-static void e1000e_ethtool_complete(struct net_device *netdev)
-{
- pm_runtime_put_sync(netdev->dev.parent);
-}
-
static const struct ethtool_ops e1000_ethtool_ops = {
- .begin = e1000e_ethtool_begin,
- .complete = e1000e_ethtool_complete,
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index a6f903a9b773..b7f38435d1fd 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -90,6 +90,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
+#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
+#define E1000_DEV_ID_PCH_I218_V2 0x15A1
+#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
#define E1000_REVISION_4 4
@@ -227,6 +231,10 @@ union e1000_rx_desc_extended {
};
#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
struct {
@@ -251,7 +259,8 @@ union e1000_rx_desc_packet_split {
} middle;
struct {
__le16 header_status;
- __le16 length[3]; /* length of buffers 1-3 */
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
} upper;
__le64 reserved;
} wb; /* writeback */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9dde390f7e71..af08188d7e62 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -185,6 +185,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
u32 phy_id = 0;
s32 ret_val;
u16 retry_count;
+ u32 mac_reg = 0;
for (retry_count = 0; retry_count < 2; retry_count++) {
ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
@@ -203,11 +204,11 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (hw->phy.id) {
if (hw->phy.id == phy_id)
- return true;
+ goto out;
} else if (phy_id) {
hw->phy.id = phy_id;
hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
- return true;
+ goto out;
}
/* In case the PHY needs to be in mdio slow mode,
@@ -219,7 +220,22 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
ret_val = e1000e_get_phy_id(hw);
hw->phy.ops.acquire(hw);
- return !ret_val;
+ if (ret_val)
+ return false;
+out:
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Unforce SMBus mode in PHY */
+ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+ }
+
+ return true;
}
/**
@@ -233,7 +249,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
{
u32 mac_reg, fwsm = er32(FWSM);
s32 ret_val;
- u16 phy_reg;
/* Gate automatic PHY configuration by hardware on managed and
* non-managed 82579 and newer adapters.
@@ -262,22 +277,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_reg);
+ /* Wait 50 milliseconds for MAC to finish any retries
+ * that it might be trying to perform from previous
+ * attempts to acknowledge any phy read requests.
+ */
+ msleep(50);
+
/* fall-through */
case e1000_pch2lan:
- if (e1000_phy_is_accessible_pchlan(hw)) {
- if (hw->mac.type == e1000_pch_lpt) {
- /* Unforce SMBus mode in PHY */
- e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
- phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
-
- /* Unforce SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
- }
+ if (e1000_phy_is_accessible_pchlan(hw))
break;
- }
/* fall-through */
case e1000_pchlan:
@@ -287,6 +296,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
if (hw->phy.ops.check_reset_block(hw)) {
e_dbg("Required LANPHYPC toggle blocked by ME\n");
+ ret_val = -E1000_ERR_PHY;
break;
}
@@ -298,15 +308,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
ew32(FEXTNVM3, mac_reg);
- if (hw->mac.type == e1000_pch_lpt) {
- /* Toggling LANPHYPC brings the PHY out of SMBus mode
- * So ensure that the MAC is also out of SMBus mode
- */
- mac_reg = er32(CTRL_EXT);
- mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
- }
-
/* Toggle LANPHYPC Value bit */
mac_reg = er32(CTRL);
mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
@@ -325,6 +326,21 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
usleep_range(5000, 10000);
} while (!(er32(CTRL_EXT) &
E1000_CTRL_EXT_LPCD) && count--);
+ usleep_range(30000, 60000);
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode
+ * so ensure that the MAC is also out of SMBus mode
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ ret_val = -E1000_ERR_PHY;
}
break;
default:
@@ -332,13 +348,14 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
}
hw->phy.ops.release(hw);
-
- /* Reset the PHY before any access to it. Doing so, ensures
- * that the PHY is in a known good state before we read/write
- * PHY registers. The generic reset is sufficient here,
- * because we haven't determined the PHY type yet.
- */
- ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (!ret_val) {
+ /* Reset the PHY before any access to it. Doing so, ensures
+ * that the PHY is in a known good state before we read/write
+ * PHY registers. The generic reset is sufficient here,
+ * because we haven't determined the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ }
out:
/* Ungate automatic PHY configuration on non-managed 82579 */
@@ -793,29 +810,31 @@ release:
* When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
* preventing further DMA write requests. Workaround the issue by disabling
* the de-assertion of the clock request when in 1Gpbs mode.
+ * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
+ * speeds in order to avoid Tx hangs.
**/
static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
{
u32 fextnvm6 = er32(FEXTNVM6);
+ u32 status = er32(STATUS);
s32 ret_val = 0;
+ u16 reg;
- if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
- u16 kmrn_reg;
-
+ if (link && (status & E1000_STATUS_SPEED_1000)) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val =
e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
- &kmrn_reg);
+ &reg);
if (ret_val)
goto release;
ret_val =
e1000e_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
- kmrn_reg &
+ reg &
~E1000_KMRNCTRLSTA_K1_ENABLE);
if (ret_val)
goto release;
@@ -827,12 +846,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
ret_val =
e1000e_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
- kmrn_reg);
+ reg);
release:
hw->phy.ops.release(hw);
} else {
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
- ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
+
+ if (!link || ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
+ goto update_fextnvm6;
+
+ ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear link status transmit timeout */
+ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
+
+ if (status & E1000_STATUS_SPEED_100) {
+ /* Set inband Tx timeout to 5x10us for 100Half */
+ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Do not extend the K1 entry latency for 100Half */
+ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ } else {
+ /* Set inband Tx timeout to 50x10us for 10Full/Half */
+ reg |= 50 <<
+ I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Extend the K1 entry latency for 10 Mbps */
+ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ }
+
+ ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
+ if (ret_val)
+ return ret_val;
+
+update_fextnvm6:
+ ew32(FEXTNVM6, fextnvm6);
}
return ret_val;
@@ -993,7 +1045,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
/* Work-around I218 hang issue */
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
return ret_val;
@@ -4168,7 +4222,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
u16 phy_reg, device_id = hw->adapter->pdev->device;
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (device_id == E1000_DEV_ID_PCH_I218_V3)) {
u32 fextnvm6 = er32(FEXTNVM6);
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 80034a2b297c..59865695b282 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -93,6 +93,7 @@
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
+#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
@@ -197,6 +198,11 @@
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+/* Inband Control */
+#define I217_INBAND_CTRL PHY_REG(770, 18)
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8
+
/* PHY Low Power Idle Control */
#define I82579_LPI_CTRL PHY_REG(772, 20)
#define I82579_LPI_CTRL_100_ENABLE 0x2000
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 77f81cbb601a..e87e9b01f404 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -64,8 +64,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
-
static const struct e1000_info *e1000_info_tbl[] = {
[board_82571] = &e1000_82571_info,
[board_82572] = &e1000_82572_info,
@@ -2979,17 +2977,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
u32 pages = 0;
/* Workaround Si errata on PCHx - configure jumbo frame flow */
- if (hw->mac.type >= e1000_pch2lan) {
- s32 ret_val;
-
- if (adapter->netdev->mtu > ETH_DATA_LEN)
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
- else
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
-
- if (ret_val)
- e_dbg("failed to enable jumbo frame workaround mode\n");
- }
+ if ((hw->mac.type >= e1000_pch2lan) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN) &&
+ e1000_lv_jumbo_workaround_ich8lan(hw, true))
+ e_dbg("failed to enable jumbo frame workaround mode\n");
/* Program MC offset vector base */
rctl = er32(RCTL);
@@ -3826,6 +3817,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
}
+ pba = 14;
+ ew32(PBA, pba);
fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
break;
@@ -4034,6 +4027,12 @@ void e1000e_down(struct e1000_adapter *adapter)
adapter->link_speed = 0;
adapter->link_duplex = 0;
+ /* Disable Si errata workaround on PCHx for jumbo frame flow */
+ if ((hw->mac.type >= e1000_pch2lan) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN) &&
+ e1000_lv_jumbo_workaround_ich8lan(hw, false))
+ e_dbg("failed to disable jumbo frame workaround mode\n");
+
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
@@ -4683,11 +4682,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_phy_regs *phy = &adapter->phy_regs;
- if ((er32(STATUS) & E1000_STATUS_LU) &&
+ if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
+ (er32(STATUS) & E1000_STATUS_LU) &&
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;
- pm_runtime_get_sync(&adapter->pdev->dev);
ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4698,7 +4697,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
if (ret_val)
e_warn("Error reading PHY register\n");
- pm_runtime_put_sync(&adapter->pdev->dev);
} else {
/* Do not read PHY registers if link is not up
* Set values to typical power-on defaults
@@ -5995,15 +5993,24 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
*/
e1000e_release_hw_control(adapter);
+ pci_clear_master(pdev);
+
/* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
* downstream port of the pci-e switch.
+ *
+ * We don't have the associated upstream bridge while assigning
+ * the PCI device into guest. For example, the KVM on power is
+ * one of the cases.
*/
if (adapter->flags & FLAG_IS_QUAD_PORT) {
struct pci_dev *us_dev = pdev->bus->self;
u16 devctl;
+ if (!us_dev)
+ return 0;
+
pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
(devctl & ~PCI_EXP_DEVCTL_CERE));
@@ -6017,38 +6024,73 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
return 0;
}
-#ifdef CONFIG_PCIEASPM
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+/**
+ * e1000e_disable_aspm - Disable ASPM states
+ * @pdev: pointer to PCI device struct
+ * @state: bit-mask of ASPM states to disable
+ *
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_dis_mask = 0;
+ u16 pdev_aspmc, parent_aspmc;
+
+ switch (state) {
+ case PCIE_LINK_STATE_L0S:
+ case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
+ /* fall-through - can't have L1 without L0s */
+ case PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
+ break;
+ default:
+ return;
+ }
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspmc);
+ parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ /* Nothing to do if the ASPM states to be disabled already are */
+ if (!(pdev_aspmc & aspm_dis_mask) &&
+ (!parent || !(parent_aspmc & aspm_dis_mask)))
+ return;
+
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
+ "L0s" : "",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
+ "L1" : "");
+
+#ifdef CONFIG_PCIEASPM
pci_disable_link_state_locked(pdev, state);
-}
-#else
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- u16 aspm_ctl = 0;
- if (state & PCIE_LINK_STATE_L0S)
- aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & PCIE_LINK_STATE_L1)
- aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1;
+ /* Double-check ASPM control. If not disabled by the above, the
+ * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
+ * not enabled); override by writing PCI config space directly.
+ */
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (!(aspm_dis_mask & pdev_aspmc))
+ return;
+#endif
/* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
- pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl);
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
- if (pdev->bus->self)
- pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
- aspm_ctl);
-}
-#endif
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
- (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
- (state & PCIE_LINK_STATE_L1) ? "L1" : "");
-
- __e1000e_disable_aspm(pdev, state);
+ if (parent)
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ aspm_dis_mask);
}
#ifdef CONFIG_PM
@@ -6723,10 +6765,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->hw.fc.current_mode = e1000_fc_default;
adapter->hw.phy.autoneg_advertised = 0x2f;
- /* ring size defaults */
- adapter->rx_ring->count = E1000_DEFAULT_RXD;
- adapter->tx_ring->count = E1000_DEFAULT_TXD;
-
/* Initial Wake on LAN setting - If APM wake is enabled in
* the EEPROM, enable the ACPI Magic Packet filter
*/
@@ -6976,6 +7014,10 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index f21a91a299a2..79b58353d849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -176,7 +176,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1111_I_PHY_ID:
@@ -238,6 +238,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
+
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
@@ -250,86 +251,52 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
size = 15;
nvm->word_size = 1 << size;
- if (hw->mac.type < e1000_i210) {
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
-
- switch (nvm->override) {
- case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
- nvm->address_bits = 16;
- break;
- case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
- nvm->address_bits = 8;
- break;
- default:
- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
- 16 : 8;
- break;
- }
- if (nvm->word_size == (1 << 15))
- nvm->page_size = 128;
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
- nvm->type = e1000_nvm_eeprom_spi;
- } else {
- nvm->type = e1000_nvm_flash_hw;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
+ break;
}
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
+
+ nvm->type = e1000_nvm_eeprom_spi;
/* NVM Function Pointers */
+ nvm->ops.acquire = igb_acquire_nvm_82575;
+ nvm->ops.release = igb_release_nvm_82575;
+ nvm->ops.write = igb_write_nvm_spi;
+ nvm->ops.validate = igb_validate_nvm_checksum;
+ nvm->ops.update = igb_update_nvm_checksum;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = igb_read_nvm_eerd;
+ else
+ nvm->ops.read = igb_read_nvm_spi;
+
+ /* override generic family function pointers for specific descendants */
switch (hw->mac.type) {
case e1000_82580:
nvm->ops.validate = igb_validate_nvm_checksum_82580;
nvm->ops.update = igb_update_nvm_checksum_82580;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
break;
case e1000_i354:
case e1000_i350:
nvm->ops.validate = igb_validate_nvm_checksum_i350;
nvm->ops.update = igb_update_nvm_checksum_i350;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
- break;
- case e1000_i210:
- nvm->ops.validate = igb_validate_nvm_checksum_i210;
- nvm->ops.update = igb_update_nvm_checksum_i210;
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_srrd_i210;
- nvm->ops.write = igb_write_nvm_srwr_i210;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- break;
- case e1000_i211:
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_i211;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- nvm->ops.validate = NULL;
- nvm->ops.update = NULL;
- nvm->ops.write = NULL;
break;
default:
- nvm->ops.validate = igb_validate_nvm_checksum;
- nvm->ops.update = igb_update_nvm_checksum;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
break;
}
@@ -516,6 +483,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_I210_FIBER:
case E1000_DEV_ID_I210_SERDES:
case E1000_DEV_ID_I210_SGMII:
+ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
mac->type = e1000_i210;
break;
case E1000_DEV_ID_I211_COPPER:
@@ -601,6 +570,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
/* NVM initialization */
ret_val = igb_init_nvm_params_82575(hw);
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ ret_val = igb_init_nvm_params_i210(hw);
+ break;
+ default:
+ break;
+ }
+
if (ret_val)
goto out;
@@ -1163,6 +1141,31 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
}
/**
+ * igb_get_link_up_info_82575 - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * This is a wrapper function, if using the serial gigabit media independent
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
+ duplex);
+ else
+ ret_val = igb_get_speed_and_duplex_copper(hw, speed,
+ duplex);
+
+ return ret_val;
+}
+
+/**
* igb_check_for_link_82575 - Check for link
* @hw: pointer to the HW structure
*
@@ -1239,7 +1242,7 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
u16 *duplex)
{
struct e1000_mac_info *mac = &hw->mac;
- u32 pcs;
+ u32 pcs, status;
/* Set up defaults for the return values of this function */
mac->serdes_has_link = false;
@@ -1260,20 +1263,31 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
mac->serdes_has_link = true;
/* Detect and store PCS speed */
- if (pcs & E1000_PCS_LSTS_SPEED_1000) {
+ if (pcs & E1000_PCS_LSTS_SPEED_1000)
*speed = SPEED_1000;
- } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
+ else if (pcs & E1000_PCS_LSTS_SPEED_100)
*speed = SPEED_100;
- } else {
+ else
*speed = SPEED_10;
- }
/* Detect and store PCS duplex */
- if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
+ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
*duplex = FULL_DUPLEX;
- } else {
+ else
*duplex = HALF_DUPLEX;
+
+ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (mac->type == e1000_i354) {
+ status = rd32(E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+ hw_dbg("2500 Mbs, ");
+ hw_dbg("Full Duplex\n");
+ }
}
+
}
return 0;
@@ -1320,7 +1334,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
**/
static s32 igb_reset_hw_82575(struct e1000_hw *hw)
{
- u32 ctrl, icr;
+ u32 ctrl;
s32 ret_val;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -1365,7 +1379,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ rd32(E1000_ICR);
/* Install any alternate MAC address into RAR0 */
ret_val = igb_check_alt_mac_addr(hw);
@@ -1443,11 +1457,18 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
wr32(E1000_CTRL, ctrl);
- /* Clear Go Link Disconnect bit */
- if (hw->mac.type >= e1000_82580) {
+ /* Clear Go Link Disconnect bit on supported devices */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
phpm_reg &= ~E1000_82580_PM_GO_LINKD;
wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ break;
+ default:
+ break;
}
ret_val = igb_setup_serdes_link_82575(hw);
@@ -1470,7 +1491,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
break;
@@ -2103,10 +2124,9 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
s32 ret_val = 0;
/* BH SW mailbox bit in SW_FW_SYNC */
u16 swmbsw_mask = E1000_SW_SYNCH_MB;
- u32 ctrl, icr;
+ u32 ctrl;
bool global_device_reset = hw->dev_spec._82575.global_device_reset;
-
hw->dev_spec._82575.global_device_reset = false;
/* due to hw errata, global device reset doesn't always
@@ -2165,7 +2185,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ rd32(E1000_ICR);
ret_val = igb_reset_mdicnfg_82580(hw);
if (ret_val)
@@ -2500,28 +2520,28 @@ s32 igb_set_eee_i354(struct e1000_hw *hw)
u16 phy_data;
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1545_E_PHY_ID))
+ (phy->id != M88E1543_E_PHY_ID))
goto out;
if (!hw->dev_spec._82575.eee_disable) {
/* Switch to PHY page 18. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18);
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
&phy_data);
if (ret_val)
goto out;
- phy_data |= E1000_M88E1545_EEE_CTRL_1_MS;
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
phy_data);
if (ret_val)
goto out;
/* Return the PHY to page 0. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0);
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
if (ret_val)
goto out;
@@ -2572,7 +2592,7 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
/* Check if EEE is supported on this device. */
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1545_E_PHY_ID))
+ (phy->id != M88E1543_E_PHY_ID))
goto out;
ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
@@ -2728,7 +2748,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
.check_for_link = igb_check_for_link_82575,
.rar_set = igb_rar_set,
.read_mac_addr = igb_read_mac_addr_82575,
- .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
+ .get_speed_and_duplex = igb_get_link_up_info_82575,
#ifdef CONFIG_IGB_HWMON
.get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index aa201abb8ad2..978eca31ceda 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -620,6 +620,7 @@
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
#define E1000_FLUDONE_ATTEMPTS 20000
#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
#define E1000_I210_FIFO_SEL_RX 0x00
@@ -627,6 +628,11 @@
#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK 0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET 328
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
#define E1000_FLUDONE_ATTEMPTS 20000
@@ -665,20 +671,26 @@
#define NVM_INIT_CTRL_4 0x0013
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
-
-/* NVM version defines */
#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
-#define NVM_MAJOR_MASK 0xF000
-#define NVM_MINOR_MASK 0x0FF0
-#define NVM_BUILD_MASK 0x000F
-#define NVM_COMB_VER_MASK 0x00FF
-#define NVM_MAJOR_SHIFT 12
-#define NVM_MINOR_SHIFT 4
-#define NVM_COMB_VER_SHFT 8
-#define NVM_VER_INVALID 0xFFFF
-#define NVM_ETRACK_SHIFT 16
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
+
#define NVM_ETS_CFG 0x003E
#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
#define NVM_ETS_LTHRES_DELTA_SHIFT 6
@@ -775,7 +787,7 @@
#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
#define I210_I_PHY_ID 0x01410C00
-#define M88E1545_E_PHY_ID 0x01410EA0
+#define M88E1543_E_PHY_ID 0x01410EA0
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -897,9 +909,9 @@
#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
-#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */
-#define E1000_M88E1545_EEE_CTRL_1 0x0
-#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1 0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
#define E1000_EEE_ADV_DEV_I354 7
#define E1000_EEE_ADV_ADDR_I354 60
#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 94d7866b9c20..37a9c06a6c68 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -67,6 +67,8 @@ struct e1000_hw;
#define E1000_DEV_ID_I210_FIBER 0x1536
#define E1000_DEV_ID_I210_SERDES 0x1537
#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
#define E1000_DEV_ID_I211_COPPER 0x1539
#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
#define E1000_DEV_ID_I354_SGMII 0x1F41
@@ -110,6 +112,7 @@ enum e1000_nvm_type {
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
+ e1000_nvm_invm,
e1000_nvm_flash_sw
};
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index ddb3cf51b9b9..0c0393316a3a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -335,57 +335,101 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_read_nvm_i211 - Read NVM wrapper function for I211
+ * igb_read_invm_word_i210 - Reads OTP
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
+ **/
+static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
+
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = rd32(E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+ break;
+ if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+ i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+ i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+ hw_dbg("Read INVM Word 0x%02x = %x",
+ address, *data);
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ }
+ if (status != E1000_SUCCESS)
+ hw_dbg("Requested word 0x%02x not found in OTP\n", address);
+ return status;
+}
+
+/**
+ * igb_read_invm_i210 - Read invm wrapper function for I210/I211
* @hw: pointer to the HW structure
* @words: number of words to read
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
-s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
+ u16 words __always_unused, u16 *data)
{
s32 ret_val = E1000_SUCCESS;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
case NVM_MAC_ADDR:
- ret_val = igb_read_invm_i211(hw, offset, &data[0]);
- ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]);
- ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
+ ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+ ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_INIT_CTRL_4:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_1_CFG:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_0_2_CFG:
- igb_read_invm_i211(hw, offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_ID_LED_SETTINGS:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = ID_LED_RESERVED_FFFF;
ret_val = E1000_SUCCESS;
@@ -411,48 +455,6 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_read_invm_i211 - Reads OTP
- * @hw: pointer to the HW structure
- * @address: the word address (aka eeprom offset) to read
- * @data: pointer to the data read
- *
- * Reads 16-bit words from the OTP. Return error when the word is not
- * stored in OTP.
- **/
-s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
-{
- s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
- u32 invm_dword;
- u16 i;
- u8 record_type, word_address;
-
- for (i = 0; i < E1000_INVM_SIZE; i++) {
- invm_dword = rd32(E1000_INVM_DATA_REG(i));
- /* Get record type */
- record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
- if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
- break;
- if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
- i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
- i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
- word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
- if (word_address == (u8)address) {
- *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
- hw_dbg("Read INVM Word 0x%02x = %x",
- address, *data);
- status = E1000_SUCCESS;
- break;
- }
- }
- }
- if (status != E1000_SUCCESS)
- hw_dbg("Requested word 0x%02x not found in OTP\n", address);
- return status;
-}
-
-/**
* igb_read_invm_version - Reads iNVM version and image type
* @hw: pointer to the HW structure
* @invm_ver: version structure for the version read
@@ -661,6 +663,23 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
}
/**
+ * igb_get_flash_presence_i210 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ *
+ **/
+bool igb_get_flash_presence_i210(struct e1000_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ eec = rd32(E1000_EECD);
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*
@@ -786,3 +805,33 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
{
return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
}
+
+/**
+ * igb_init_nvm_params_i210 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ nvm->ops.acquire = igb_acquire_nvm_i210;
+ nvm->ops.release = igb_release_nvm_i210;
+ nvm->ops.valid_led_default = igb_valid_led_default_i210;
+
+ /* NVM Function Pointers */
+ if (igb_get_flash_presence_i210(hw)) {
+ hw->nvm.type = e1000_nvm_flash_hw;
+ nvm->ops.read = igb_read_nvm_srrd_i210;
+ nvm->ops.write = igb_write_nvm_srwr_i210;
+ nvm->ops.validate = igb_validate_nvm_checksum_i210;
+ nvm->ops.update = igb_update_nvm_checksum_i210;
+ } else {
+ hw->nvm.type = e1000_nvm_invm;
+ nvm->ops.read = igb_read_invm_i210;
+ nvm->ops.write = NULL;
+ nvm->ops.validate = NULL;
+ nvm->ops.update = NULL;
+ }
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5caa332e7556..dde3c4b7ea99 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -35,20 +35,19 @@ extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
-extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
extern s32 igb_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 *data);
extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 data);
+extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index bab556a47fcc..f0dfd41dd4bd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1171,17 +1171,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
hw_dbg("Half Duplex\n");
}
- /* Check if it is an I354 2.5Gb backplane connection. */
- if (hw->mac.type == e1000_i354) {
- if ((status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER)) {
- *speed = SPEED_2500;
- *duplex = FULL_DUPLEX;
- hw_dbg("2500 Mbs, ");
- hw_dbg("Full Duplex\n");
- }
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 7f9cd7cbd353..a7db7f3db914 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -709,11 +709,16 @@ out:
**/
void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
{
- u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
- u16 fw_version;
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+ /* basic eeprom version numbers and bits used vary by part and by tool
+ * used to create the nvm images. Check which data format we have.
+ */
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
switch (hw->mac.type) {
case e1000_i211:
igb_read_invm_version(hw, fw_vers);
@@ -721,30 +726,30 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
case e1000_82575:
case e1000_82576:
case e1000_82580:
- case e1000_i354:
- case e1000_i350:
- case e1000_i210:
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
break;
- default:
- return;
- }
- /* basic eeprom version numbers */
- hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
-
- /* etrack id */
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
- fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
-
- switch (hw->mac.type) {
case e1000_i210:
- case e1000_i354:
+ if (!(igb_get_flash_presence_i210(hw))) {
+ igb_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
case e1000_i350:
/* find combo image version */
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ 1), 1, &comb_verh);
@@ -760,15 +765,42 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
fw_vers->or_major =
comb_verl >> NVM_COMB_VER_SHFT;
fw_vers->or_build =
- ((comb_verl << NVM_COMB_VER_SHFT)
- | (comb_verh >> NVM_COMB_VER_SHFT));
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
fw_vers->or_patch =
comb_verh & NVM_COMB_VER_MASK;
}
}
break;
default:
- break;
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+etrack_id:
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
}
return;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 6bfc0c43aace..433b7419cb98 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -44,6 +44,7 @@ struct e1000_fw_version {
u32 etrack_id;
u16 eep_major;
u16 eep_minor;
+ u16 eep_build;
u8 invm_major;
u8 invm_minor;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 60461946f98c..e7266759a10b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -731,15 +731,13 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- if (phy->reset_disable) {
- ret_val = 0;
- goto out;
- }
+ if (phy->reset_disable)
+ return 0;
/* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Options:
* MDI/MDI-X = 0 (default)
@@ -780,23 +778,36 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
/* Enable downshift and setting it to X6 */
+ if (phy->id == M88E1543_E_PHY_ID) {
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+ ret_val =
+ phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val) {
+ hw_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Commit the changes. */
ret_val = igb_phy_sw_reset(hw);
if (ret_val) {
hw_dbg("Error committing the PHY changes\n");
- goto out;
+ return ret_val;
}
-out:
- return ret_val;
+ return 0;
}
/**
@@ -1806,7 +1817,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
phy->cable_length = phy_data / (is_cm ? 100 : 1);
break;
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 15ea8dc9dad3..6807b098edae 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -343,6 +343,8 @@ struct hwmon_buff {
};
#endif
+#define IGB_RETA_SIZE 128
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -444,6 +446,10 @@ struct igb_adapter {
struct i2c_algo_bit_data i2c_algo;
struct i2c_adapter i2c_adap;
struct i2c_client *i2c_client;
+ u32 rss_indir_tbl_init;
+ u8 rss_indir_tbl[IGB_RETA_SIZE];
+
+ unsigned long link_check_timeout;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -455,6 +461,7 @@ struct igb_adapter {
#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
+#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
@@ -480,6 +487,7 @@ extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
extern void igb_reset(struct igb_adapter *);
+extern void igb_write_rss_indir_tbl(struct igb_adapter *);
extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
extern int igb_setup_tx_resources(struct igb_ring *);
extern int igb_setup_rx_resources(struct igb_ring *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 85fe7b52f435..48cbc833b051 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -172,10 +172,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_Autoneg |
SUPPORTED_Pause);
ecmd->advertising = ADVERTISED_FIBRE;
- if (hw->mac.type == e1000_i354) {
- ecmd->supported |= SUPPORTED_2500baseX_Full;
- ecmd->advertising |= ADVERTISED_2500baseX_Full;
- }
+
if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->advertising |= ADVERTISED_1000baseT_Full;
@@ -209,16 +206,23 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU) {
- if ((hw->mac.type == e1000_i354) &&
- (status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER))
- ecmd->speed = SPEED_2500;
- else if (status & E1000_STATUS_SPEED_1000)
+ if (hw->mac.type == e1000_i354) {
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ ecmd->supported = SUPPORTED_2500baseX_Full;
+ ecmd->advertising = ADVERTISED_2500baseX_Full;
+ ecmd->speed = SPEED_2500;
+ } else {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ }
+ } else if (status & E1000_STATUS_SPEED_1000) {
ecmd->speed = SPEED_1000;
- else if (status & E1000_STATUS_SPEED_100)
+ } else if (status & E1000_STATUS_SPEED_100) {
ecmd->speed = SPEED_100;
- else
+ } else {
ecmd->speed = SPEED_10;
+ }
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
ecmd->duplex = DUPLEX_FULL;
@@ -1335,12 +1339,23 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
+ struct e1000_hw *hw = &adapter->hw;
+
*data = 0;
- /* Validate eeprom on all parts but i211 */
- if (adapter->hw.mac.type != e1000_i211) {
+ /* Validate eeprom on all parts but flashless */
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (igb_get_flash_presence_i210(hw)) {
+ if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
+ *data = 2;
+ }
+ break;
+ default:
if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
*data = 2;
+ break;
}
return *data;
@@ -2672,7 +2687,9 @@ static int igb_set_eee(struct net_device *netdev,
igb_set_eee_i350(hw);
/* reset link */
- if (!netif_running(netdev))
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ else
igb_reset(adapter);
}
@@ -2771,6 +2788,90 @@ static void igb_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
+static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return IGB_RETA_SIZE;
+}
+
+static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+
+ return 0;
+}
+
+void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg = E1000_RETA(0);
+ u32 shift = 0;
+ int i = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ shift = 6;
+ break;
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ shift = 3;
+ break;
+ default:
+ break;
+ }
+
+ while (i < IGB_RETA_SIZE) {
+ u32 val = 0;
+ int j;
+
+ for (j = 3; j >= 0; j--) {
+ val <<= 8;
+ val |= adapter->rss_indir_tbl[i + j];
+ }
+
+ wr32(reg, val << shift);
+ reg += 4;
+ i += 4;
+ }
+}
+
+static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u32 num_queues;
+
+ num_queues = adapter->rss_queues;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ num_queues = 2;
+ break;
+ default:
+ break;
+ }
+
+ /* Verify user input. */
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ if (indir[i] >= num_queues)
+ return -EINVAL;
+
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
+
+ igb_write_rss_indir_tbl(adapter);
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2804,6 +2905,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_eee = igb_set_eee,
.get_module_info = igb_get_module_info,
.get_module_eeprom = igb_get_module_eeprom,
+ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
+ .get_rxfh_indir = igb_get_rxfh_indir,
+ .set_rxfh_indir = igb_set_rxfh_indir,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c1d72c03cb59..8cf44f2a8ccd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -62,7 +62,7 @@
#define MAJ 5
#define MIN 0
-#define BUILD 3
+#define BUILD 5
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -85,6 +85,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
@@ -1013,7 +1015,7 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* ixgbe_get_stats64() might access the rings on this vector,
+ /* igb_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
@@ -1669,6 +1671,8 @@ void igb_down(struct igb_adapter *adapter)
igb_irq_disable(adapter);
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+
for (i = 0; i < adapter->num_q_vectors; i++) {
napi_synchronize(&(adapter->q_vector[i]->napi));
napi_disable(&(adapter->q_vector[i]->napi));
@@ -1929,12 +1933,17 @@ void igb_set_fw_version(struct igb_adapter *adapter)
igb_get_fw_version(hw, &fw);
switch (hw->mac.type) {
+ case e1000_i210:
case e1000_i211:
- snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor, fw.invm_img_type);
- break;
-
+ if (!(igb_get_flash_presence_i210(hw))) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
+ break;
+ }
+ /* fall through */
default:
/* if option is rom valid, display its version too */
if (fw.or_valid) {
@@ -1944,11 +1953,16 @@ void igb_set_fw_version(struct igb_adapter *adapter)
fw.eep_major, fw.eep_minor, fw.etrack_id,
fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
- } else {
+ } else if (fw.etrack_id != 0X0000) {
snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor, fw.etrack_id);
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
@@ -2166,15 +2180,28 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
hw->mac.ops.reset_hw(hw);
- /* make sure the NVM is good , i211 parts have special NVM that
- * doesn't contain a checksum
+ /* make sure the NVM is good , i211/i210 parts can have special NVM
+ * that doesn't contain a checksum
*/
- if (hw->mac.type != e1000_i211) {
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (igb_get_flash_presence_i210(hw)) {
+ if (hw->nvm.ops.validate(hw) < 0) {
+ dev_err(&pdev->dev,
+ "The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+ }
+ break;
+ default:
if (hw->nvm.ops.validate(hw) < 0) {
dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
+ break;
}
/* copy the MAC address out of the NVM */
@@ -2342,7 +2369,14 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Width x1" : "unknown"), netdev->dev_addr);
}
- ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
+ if ((hw->mac.type >= e1000_i210 ||
+ igb_get_flash_presence_i210(hw))) {
+ ret_val = igb_read_part_string(hw, part_str,
+ E1000_PBANUM_LENGTH);
+ } else {
+ ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ }
+
if (ret_val)
strcpy(part_str, "Unknown");
dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
@@ -2436,6 +2470,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
+ if (!adapter->msix_entries) {
+ err = -EPERM;
+ goto out;
+ }
+
if (!num_vfs)
goto out;
else if (old_vfs && old_vfs == num_vfs)
@@ -3096,7 +3135,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
- u32 j, num_rx_queues, shift = 0;
+ u32 j, num_rx_queues;
static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
0xA32DCB77, 0x0CF23080, 0x3BB7426A,
@@ -3109,35 +3148,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
num_rx_queues = adapter->rss_queues;
switch (hw->mac.type) {
- case e1000_82575:
- shift = 6;
- break;
case e1000_82576:
/* 82576 supports 2 RSS queues for SR-IOV */
- if (adapter->vfs_allocated_count) {
- shift = 3;
+ if (adapter->vfs_allocated_count)
num_rx_queues = 2;
- }
break;
default:
break;
}
- /* Populate the indirection table 4 entries at a time. To do this
- * we are generating the results for n and n+2 and then interleaving
- * those with the results with n+1 and n+3.
- */
- for (j = 0; j < 32; j++) {
- /* first pass generates n and n+2 */
- u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
- u32 reta = (base & 0x07800780) >> (7 - shift);
-
- /* second pass generates n+1 and n+3 */
- base += 0x00010001 * num_rx_queues;
- reta |= (base & 0x07800780) << (1 + shift);
-
- wr32(E1000_RETA(j), reta);
+ if (adapter->rss_indir_tbl_init != num_rx_queues) {
+ for (j = 0; j < IGB_RETA_SIZE; j++)
+ adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl_init = num_rx_queues;
}
+ igb_write_rss_indir_tbl(adapter);
/* Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback. No need to enable TCP/UDP/IP checksum
@@ -3844,7 +3869,6 @@ bool igb_has_link(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
- s32 ret_val = 0;
/* get_link_status is set on LSC (link status) interrupt or
* rx sequence error interrupt. get_link_status will stay
@@ -3853,22 +3877,28 @@ bool igb_has_link(struct igb_adapter *adapter)
*/
switch (hw->phy.media_type) {
case e1000_media_type_copper:
- if (hw->mac.get_link_status) {
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- } else {
- link_active = true;
- }
- break;
+ if (!hw->mac.get_link_status)
+ return true;
case e1000_media_type_internal_serdes:
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = hw->mac.serdes_has_link;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
break;
default:
case e1000_media_type_unknown:
break;
}
+ if (((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) &&
+ (hw->phy.id == I210_I_PHY_ID)) {
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
+ adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ }
+ }
+
return link_active;
}
@@ -3913,6 +3943,14 @@ static void igb_watchdog_task(struct work_struct *work)
int i;
link = igb_has_link(adapter);
+
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
+ if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ else
+ link = false;
+ }
+
if (link) {
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4037,9 +4075,14 @@ static void igb_watchdog_task(struct work_struct *work)
igb_ptp_rx_hang(adapter);
/* Reset the timer */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + HZ));
+ else
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
}
enum latency_range {
@@ -4814,6 +4857,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
+
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
@@ -4865,6 +4912,8 @@ void igb_update_stats(struct igb_adapter *adapter,
bytes = 0;
packets = 0;
+
+ rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i];
@@ -4900,6 +4949,7 @@ void igb_update_stats(struct igb_adapter *adapter,
}
net_stats->tx_bytes = bytes;
net_stats->tx_packets = packets;
+ rcu_read_unlock();
/* read stats registers */
adapter->stats.crcerrs += rd32(E1000_CRCERRS);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7e8c477b0ab9..5a54e3dc535d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -97,14 +97,14 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
+ u32 lo, hi;
u64 val;
- u32 lo, hi, jk;
/* The timestamp latches on lowest register read. For the 82580
* the lowest register is SYSTIMR instead of SYSTIML. However we only
* need to provide nanosecond resolution, so we just ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
+ rd32(E1000_SYSTIMR);
lo = rd32(E1000_SYSTIML);
hi = rd32(E1000_SYSTIMH);
@@ -118,13 +118,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
{
struct e1000_hw *hw = &adapter->hw;
- u32 sec, nsec, jk;
+ u32 sec, nsec;
/* The timestamp latches on lowest register read. For I210/I211, the
* lowest register is SYSTIMR. Since we only need to provide nanosecond
* resolution, we can ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
+ rd32(E1000_SYSTIMR);
nsec = rd32(E1000_SYSTIML);
sec = rd32(E1000_SYSTIMH);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index fce3e92f9d11..9f6b236828e6 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -718,8 +718,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL | __GFP_ZERO);
+ txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a6494e5daffe..0ac6b11c6e4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -618,9 +618,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
-#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11)
/* Tx fast path data */
int num_tx_queues;
@@ -754,7 +753,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
- __IXGBE_READ_I2C,
+ __IXGBE_PTP_RUNNING,
};
struct ixgbe_cb {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4a5bfb6b3af0..a26f3fee4f35 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1018,8 +1018,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
u16 sfp_addr = 0;
u16 sfp_data = 0;
u16 sfp_stat = 0;
+ u16 gssr;
u32 i;
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ return IXGBE_ERR_SWFW_SYNC;
+
if (hw->phy.type == ixgbe_phy_nl) {
/*
* phy SDA/SCL registers are at addresses 0xC30A to
@@ -1028,17 +1037,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
*/
sfp_addr = (dev_addr << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
- hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
- MDIO_MMD_PMAPMD,
- sfp_addr);
+ hw->phy.ops.write_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ MDIO_MMD_PMAPMD,
+ sfp_addr);
/* Poll status */
for (i = 0; i < 100; i++) {
- hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
- MDIO_MMD_PMAPMD,
- &sfp_stat);
+ hw->phy.ops.read_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ MDIO_MMD_PMAPMD,
+ &sfp_stat);
sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
break;
@@ -1052,8 +1061,8 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
}
/* Read data */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
- MDIO_MMD_PMAPMD, &sfp_data);
+ hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+ MDIO_MMD_PMAPMD, &sfp_data);
*eeprom_data = (u8)(sfp_data >> 8);
} else {
@@ -1061,6 +1070,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
}
out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
return status;
}
@@ -1321,11 +1331,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
static struct ixgbe_phy_operations phy_ops_82598 = {
.identify = &ixgbe_identify_phy_generic,
- .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82598,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
+ .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
+ .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 0b82d38bc97d..007a0083a636 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -49,6 +49,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -58,6 +59,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
@@ -137,11 +142,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
goto setup_sfp_out;
}
- hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
while (data_value != 0xffff) {
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
IXGBE_WRITE_FLUSH(hw);
- hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
}
/* Release the semaphore */
@@ -187,6 +194,17 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
setup_sfp_out:
return ret_val;
+
+setup_sfp_err:
+ /* Release the semaphore */
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access,
+ * semaphore_delay is in ms usleep_range needs us.
+ */
+ usleep_range(hw->eeprom.semaphore_delay * 1000,
+ hw->eeprom.semaphore_delay * 2000);
+ hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+ return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
}
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
@@ -219,6 +237,25 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
s32 ret_val = 0;
+ u32 esdp;
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
+ /* Store flag indicating I2C bus access control unit. */
+ hw->phy.qsfp_shared_i2c_bus = true;
+
+ /* Initialize access to QSFP+ I2C bus */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0_DIR;
+ esdp &= ~IXGBE_ESDP_SDP1_DIR;
+ esdp &= ~IXGBE_ESDP_SDP0;
+ esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
+ esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
+ phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
+ }
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
@@ -342,8 +379,13 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- *autoneg = true;
+ IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* QSFP must not enable auto-negotiation */
+ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
+ *autoneg = false;
+ else
+ *autoneg = true;
}
out:
@@ -397,6 +439,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_LS:
media_type = ixgbe_media_type_fiber_lco;
break;
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ media_type = ixgbe_media_type_fiber_qsfp;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -406,6 +451,24 @@ out:
}
/**
+ * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
+ * @hw: pointer to hardware structure
+ *
+ * Disables link, should be called during D3 power down sequence.
+ *
+ */
+static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
+{
+ u32 autoc2_reg;
+
+ if (!hw->mng_fw_enabled && !hw->wol_enabled) {
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ }
+}
+
+/**
* ixgbe_start_mac_link_82599 - Setup MAC link settings
* @hw: pointer to hardware structure
* @autoneg_wait_to_complete: true when waiting for completion is needed
@@ -527,6 +590,75 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * We set the module speed differently for fixed fiber. For other
+ * multi-speed devices we don't have an error value so here if we
+ * detect an error we just log it and exit.
+ */
+static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ hw_dbg(hw, "Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
+ goto out;
+ }
+out:
+ return;
+}
+
+/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -573,9 +705,19 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type.\n");
+ break;
+ }
/* Allow module to change analog characteristics (1G->10G) */
msleep(40);
@@ -625,10 +767,24 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
- esdp_reg &= ~IXGBE_ESDP_SDP5;
- esdp_reg |= IXGBE_ESDP_SDP5_DIR;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ ixgbe_set_fiber_fixed_speed(hw,
+ IXGBE_LINK_SPEED_1GB_FULL);
+ break;
+ case ixgbe_media_type_fiber:
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type.\n");
+ break;
+ }
/* Allow module to change analog characteristics (10G->1G) */
msleep(40);
@@ -1872,7 +2028,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
goto out;
else
- status = ixgbe_identify_sfp_module_generic(hw);
+ status = ixgbe_identify_module_generic(hw);
}
/* Set PHY type none if no PHY detected */
@@ -1978,10 +2134,12 @@ sfp_check:
switch (hw->phy.type) {
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_qsfp_passive_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
break;
case ixgbe_phy_sfp_ftl_active:
case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
break;
case ixgbe_phy_sfp_avago:
@@ -1999,6 +2157,15 @@ sfp_check:
else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
default:
break;
}
@@ -2045,6 +2212,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
+ u16 offset;
u16 fw_version = 0;
/* firmware check is only necessary for SFI devices */
@@ -2054,29 +2222,35 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
}
/* get the offset to the Firmware Module block */
- hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+ offset = IXGBE_FW_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &fw_offset))
+ goto fw_version_err;
if ((fw_offset == 0) || (fw_offset == 0xFFFF))
goto fw_version_out;
/* get the offset to the Pass Through Patch Configuration block */
- hw->eeprom.ops.read(hw, (fw_offset +
- IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
- &fw_ptp_cfg_offset);
+ offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
+ goto fw_version_err;
if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
goto fw_version_out;
/* get the firmware version */
- hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
- IXGBE_FW_PATCH_VERSION_4),
- &fw_version);
+ offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
+ if (hw->eeprom.ops.read(hw, offset, &fw_version))
+ goto fw_version_err;
if (fw_version > 0x5)
status = 0;
fw_version_out:
return status;
+
+fw_version_err:
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
+ return IXGBE_ERR_EEPROM_VERSION;
}
/**
@@ -2236,6 +2410,112 @@ reset_pipeline_out:
return ret_val;
}
+/**
+ * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ usleep_range(5000, 10000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ usleep_range(5000, 10000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -2255,6 +2535,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
.read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
+ .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599,
.setup_link = &ixgbe_setup_mac_link_82599,
.set_rxpba = &ixgbe_set_rxpba_generic,
.check_link = &ixgbe_check_mac_link_generic,
@@ -2300,7 +2581,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
static struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
- .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82599,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bcdeb89af5a..b5c434b617b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -65,17 +65,42 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
* function check the device id to see if the associated phy supports
* autoneg flow control.
**/
-s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
{
+ bool supported = false;
+ ixgbe_link_speed speed;
+ bool link_up;
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X540T:
- case IXGBE_DEV_ID_X540T1:
- case IXGBE_DEV_ID_82599_T3_LOM:
- return 0;
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber:
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ /* if link is down, assume supported */
+ if (link_up)
+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ true : false;
+ else
+ supported = true;
+ break;
+ case ixgbe_media_type_backplane:
+ supported = true;
+ break;
+ case ixgbe_media_type_copper:
+ /* only some copper devices support flow control autoneg */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
+ supported = true;
+ break;
+ default:
+ break;
+ }
default:
- return IXGBE_ERR_FC_NOT_SUPPORTED;
+ break;
}
+
+ return supported;
}
/**
@@ -114,6 +139,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -234,7 +260,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
IXGBE_GSSR_MAC_CSR_SM);
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+ ixgbe_device_supports_autoneg_fc(hw)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN, reg_cu);
}
@@ -2380,6 +2406,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2392,7 +2419,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
/* Autoneg flow control on copper adapters */
case ixgbe_media_type_copper:
- if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+ if (ixgbe_device_supports_autoneg_fc(hw))
ret_val = ixgbe_fc_autoneg_copper(hw);
break;
@@ -2479,42 +2506,39 @@ out:
**/
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
{
- u32 gssr;
+ u32 gssr = 0;
u32 swmask = mask;
u32 fwmask = mask << 5;
- s32 timeout = 200;
+ u32 timeout = 200;
+ u32 i;
- while (timeout) {
+ for (i = 0; i < timeout; i++) {
/*
- * SW EEPROM semaphore bit is used for access to all
- * SW_FW_SYNC/GSSR bits (not just EEPROM)
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_eeprom_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
- if (!(gssr & (fwmask | swmask)))
- break;
-
- /*
- * Firmware currently using resource (fwmask) or other software
- * thread currently using resource (swmask)
- */
- ixgbe_release_eeprom_semaphore(hw);
- usleep_range(5000, 10000);
- timeout--;
- }
-
- if (!timeout) {
- hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
- return IXGBE_ERR_SWFW_SYNC;
+ if (!(gssr & (fwmask | swmask))) {
+ gssr |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+ ixgbe_release_eeprom_semaphore(hw);
+ return 0;
+ } else {
+ /* Resource is currently in use by FW or SW */
+ ixgbe_release_eeprom_semaphore(hw);
+ usleep_range(5000, 10000);
+ }
}
- gssr |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+ /* If time expired clear the bits holding the lock and retry */
+ if (gssr & (fwmask | swmask))
+ ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
- ixgbe_release_eeprom_semaphore(hw);
- return 0;
+ usleep_range(5000, 10000);
+ return IXGBE_ERR_SWFW_SYNC;
}
/**
@@ -2716,13 +2740,19 @@ out:
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
u16 *san_mac_offset)
{
+ s32 ret_val;
+
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available.
*/
- hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
+ san_mac_offset);
+ if (ret_val)
+ hw_err(hw, "eeprom read at offset %d failed\n",
+ IXGBE_SAN_MAC_ADDR_PTR);
- return 0;
+ return ret_val;
}
/**
@@ -2739,23 +2769,16 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
u16 san_mac_data, san_mac_offset;
u8 i;
+ s32 ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available. If they're not, no point in calling set_lan_id() here.
*/
- ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
- if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
- /*
- * No addresses available in this EEPROM. It's not an
- * error though, so just wipe the local address and return.
- */
- for (i = 0; i < 6; i++)
- san_mac_addr[i] = 0xFF;
-
- goto san_mac_addr_out;
- }
+ goto san_mac_addr_clr;
/* make sure we know which port we need to program */
hw->mac.ops.set_lan_id(hw);
@@ -2763,14 +2786,26 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
(san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
- hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+ ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
+ &san_mac_data);
+ if (ret_val) {
+ hw_err(hw, "eeprom read at offset %d failed\n",
+ san_mac_offset);
+ goto san_mac_addr_clr;
+ }
san_mac_addr[i * 2] = (u8)(san_mac_data);
san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
san_mac_offset++;
}
-
-san_mac_addr_out:
return 0;
+
+san_mac_addr_clr:
+ /* No addresses available in this EEPROM. It's not necessarily an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+ return ret_val;
}
/**
@@ -3219,8 +3254,9 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
*wwpn_prefix = 0xFFFF;
/* check if alternative SAN MAC is supported */
- hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
- &alt_san_mac_blk_offset);
+ offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
+ goto wwn_prefix_err;
if ((alt_san_mac_blk_offset == 0) ||
(alt_san_mac_blk_offset == 0xFFFF))
@@ -3228,19 +3264,26 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
/* check capability in alternative san mac address block */
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
- hw->eeprom.ops.read(hw, offset, &caps);
+ if (hw->eeprom.ops.read(hw, offset, &caps))
+ goto wwn_prefix_err;
if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
goto wwn_prefix_out;
/* get the corresponding prefix for WWNN/WWPN */
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+ if (hw->eeprom.ops.read(hw, offset, wwnn_prefix))
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+ if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
+ goto wwn_prefix_err;
wwn_prefix_out:
return 0;
+
+wwn_prefix_err:
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
+ return 0;
}
/**
@@ -3754,7 +3797,11 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
u8 sensor_index;
u8 sensor_location;
- hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
+ if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) {
+ hw_err(hw, "eeprom read at offset %d failed\n",
+ ets_offset + 1 + i);
+ continue;
+ }
sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
IXGBE_ETS_DATA_INDEX_SHIFT);
sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 22eee38868f1..d259dc76604e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -80,7 +80,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -143,8 +143,12 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
+
#define hw_dbg(hw, format, arg...) \
- netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
+ netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg)
+#define hw_err(hw, format, arg...) \
+ netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg)
#define e_dev_info(format, arg...) \
dev_info(&adapter->pdev->dev, format, ## arg)
#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 24e2e7aafda2..0e1b973659b0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -311,9 +311,6 @@ static int ixgbe_set_settings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (ecmd->autoneg == AUTONEG_DISABLE)
- return -EINVAL;
-
if (ecmd->advertising & ~ecmd->supported)
return -EINVAL;
@@ -355,10 +352,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- if (hw->fc.disable_fc_autoneg)
- pause->autoneg = 0;
- else
+ if (ixgbe_device_supports_autoneg_fc(hw) &&
+ !hw->fc.disable_fc_autoneg)
pause->autoneg = 1;
+ else
+ pause->autoneg = 0;
if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
@@ -384,7 +382,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
/* some devices do not support autoneg of link flow control */
if ((pause->autoneg == AUTONEG_ENABLE) &&
- (ixgbe_device_supports_autoneg_fc(hw) != 0))
+ !ixgbe_device_supports_autoneg_fc(hw))
return -EINVAL;
fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
@@ -1048,7 +1046,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
+ for (j = 0; j < netdev->num_tx_queues; j++) {
ring = adapter->tx_ring[j];
if (!ring) {
data[i] = 0;
@@ -1140,11 +1138,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef LL_EXTENDED_STATS
- sprintf(p, "tx_q_%u_napi_yield", i);
+ sprintf(p, "tx_queue_%u_ll_napi_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_q_%u_misses", i);
+ sprintf(p, "tx_queue_%u_ll_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_q_%u_cleaned", i);
+ sprintf(p, "tx_queue_%u_ll_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* LL_EXTENDED_STATS */
}
@@ -1154,11 +1152,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef LL_EXTENDED_STATS
- sprintf(p, "rx_q_%u_ll_poll_yield", i);
+ sprintf(p, "rx_queue_%u_ll_poll_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_q_%u_misses", i);
+ sprintf(p, "rx_queue_%u_ll_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_q_%u_cleaned", i);
+ sprintf(p, "rx_queue_%u_ll_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* LL_EXTENDED_STATS */
}
@@ -1884,11 +1882,12 @@ static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
bool if_running = netif_running(netdev);
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ struct ixgbe_hw *hw = &adapter->hw;
+
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
int i;
for (i = 0; i < adapter->num_vfs; i++) {
@@ -1912,21 +1911,18 @@ static void ixgbe_diag_test(struct net_device *netdev,
/* Offline tests */
e_info(hw, "offline testing starting\n");
- if (if_running)
- /* indicate we're in test mode */
- dev_close(netdev);
-
- /* bringing adapter down disables SFP+ optics */
- if (hw->mac.ops.enable_tx_laser)
- hw->mac.ops.enable_tx_laser(hw);
-
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- ixgbe_reset(adapter);
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbe_reset(adapter);
+
e_info(hw, "register testing starting\n");
if (ixgbe_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1963,13 +1959,11 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
if (if_running)
dev_open(netdev);
+ else if (hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
} else {
e_info(hw, "online testing starting\n");
- /* if adapter is down, SFP+ optics will be disabled */
- if (!if_running && hw->mac.ops.enable_tx_laser)
- hw->mac.ops.enable_tx_laser(hw);
-
/* Online tests */
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1983,9 +1977,6 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
}
- /* if adapter was down, ensure SFP+ optics are disabled again */
- if (!if_running && hw->mac.ops.disable_tx_laser)
- hw->mac.ops.disable_tx_laser(hw);
skip_ol_tests:
msleep_interruptible(4 * 1000);
}
@@ -2909,33 +2900,21 @@ static int ixgbe_get_module_info(struct net_device *dev,
struct ixgbe_hw *hw = &adapter->hw;
u32 status;
u8 sff8472_rev, addr_mode;
- int ret_val = 0;
bool page_swap = false;
- /* avoid concurent i2c reads */
- while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
- msleep(100);
-
- /* used by the service task */
- set_bit(__IXGBE_READ_I2C, &adapter->state);
-
/* Check whether we support SFF-8472 or not */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
&sff8472_rev);
- if (status != 0) {
- ret_val = -EIO;
- goto err_out;
- }
+ if (status != 0)
+ return -EIO;
/* addressing mode is not supported */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_SWAP,
&addr_mode);
- if (status != 0) {
- ret_val = -EIO;
- goto err_out;
- }
+ if (status != 0)
+ return -EIO;
if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
@@ -2952,9 +2931,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
-err_out:
- clear_bit(__IXGBE_READ_I2C, &adapter->state);
- return ret_val;
+ return 0;
}
static int ixgbe_get_module_eeprom(struct net_device *dev,
@@ -2966,51 +2943,27 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u8 databyte = 0xFF;
int i = 0;
- int ret_val = 0;
- /* ixgbe_get_module_info is called before this function in all
- * cases, so we do not need any checks we already do above,
- * and can trust ee->len to be a known value.
- */
+ if (ee->len == 0)
+ return -EINVAL;
- while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
- msleep(100);
- set_bit(__IXGBE_READ_I2C, &adapter->state);
-
- /* Read the first block, SFF-8079 */
- for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
- status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
- if (status != 0) {
- /* Error occured while reading module */
- ret_val = -EIO;
- goto err_out;
- }
- data[i] = databyte;
- }
+ for (i = ee->offset; i < ee->offset + ee->len; i++) {
+ /* I2C reads can take long time */
+ if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ return -EBUSY;
- /* If the second block is requested, check if SFF-8472 is supported. */
- if (ee->len == ETH_MODULE_SFF_8472_LEN) {
- if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
- return -EOPNOTSUPP;
-
- /* Read the second block, SFF-8472 */
- for (i = ETH_MODULE_SFF_8079_LEN;
- i < ETH_MODULE_SFF_8472_LEN; i++) {
- status = hw->phy.ops.read_i2c_sff8472(hw,
- i - ETH_MODULE_SFF_8079_LEN, &databyte);
- if (status != 0) {
- /* Error occured while reading module */
- ret_val = -EIO;
- goto err_out;
- }
- data[i] = databyte;
- }
- }
+ if (i < ETH_MODULE_SFF_8079_LEN)
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ else
+ status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
-err_out:
- clear_bit(__IXGBE_READ_I2C, &adapter->state);
+ if (status != 0)
+ return -EIO;
- return ret_val;
+ data[i - ee->offset] = databyte;
+ }
+
+ return 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index be4b1fb3d0d2..7aba452833e5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.13.10-k"
+#define DRV_VERSION "3.15.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2013 Intel Corporation.";
@@ -109,6 +109,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
@@ -195,6 +196,86 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
return 0;
}
+/**
+ * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
+ * @hw: hw specific details
+ *
+ * This function is used by probe to determine whether a device's PCI-Express
+ * bandwidth details should be gathered from the parent bus instead of from the
+ * device. Used to ensure that various locations all have the correct device ID
+ * checks.
+ */
+static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+{
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
+ int expected_gts)
+{
+ int max_gts = 0;
+ enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+ enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+ struct pci_dev *pdev;
+
+ /* determine whether to use the the parent device
+ */
+ if (ixgbe_pcie_from_parent(&adapter->hw))
+ pdev = adapter->pdev->bus->parent->self;
+ else
+ pdev = adapter->pdev;
+
+ if (pcie_get_minimum_link(pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+ e_dev_warn("Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 2 * width;
+ break;
+ case PCIE_SPEED_5_0GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 4 * width;
+ break;
+ case PCIE_SPEED_8_0GT:
+ /* 128b/130b encoding only reduces throughput by 1% */
+ max_gts = 8 * width;
+ break;
+ default:
+ e_dev_warn("Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ e_dev_info("PCI Express bandwidth of %dGT/s available\n",
+ max_gts);
+ e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+ "Unknown"),
+ width,
+ (speed == PCIE_SPEED_2_5GT ? "20%" :
+ speed == PCIE_SPEED_5_0GT ? "20%" :
+ speed == PCIE_SPEED_8_0GT ? "N/a" :
+ "Unknown"));
+
+ if (max_gts < expected_gts) {
+ e_dev_warn("This is not sufficient for optimal performance of this card.\n");
+ e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
+ expected_gts);
+ e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
+ }
+}
+
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -3724,8 +3805,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
- /* don't hardware filter vlans in promisc mode */
- ixgbe_vlan_filter_disable(adapter);
+ /* Only disable hardware filter vlans in promiscuous mode
+ * if SR-IOV and VMDQ are disabled - otherwise ensure
+ * that hardware VLAN filters remain enabled.
+ */
+ if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED)))
+ ixgbe_vlan_filter_disable(adapter);
+ else
+ ixgbe_vlan_filter_enable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
@@ -4087,6 +4175,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_passive_unknown:
case ixgbe_phy_sfp_active_unknown:
case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_qsfp_passive_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
return true;
case ixgbe_phy_nl:
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4352,7 +4444,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
if (hw->mac.san_mac_rar_index)
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_reset(adapter);
}
@@ -4714,8 +4806,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
- hw->fc.disable_fc_autoneg =
- (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
+ hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
#ifdef CONFIG_PCI_IOV
/* assign number of SR-IOV VFs */
@@ -5205,6 +5296,9 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return retval;
#endif
+ if (hw->mac.ops.stop_link_on_d3)
+ hw->mac.ops.stop_link_on_d3(hw);
+
if (wufc) {
ixgbe_set_rx_mode(netdev);
@@ -5681,7 +5775,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
adapter->last_rx_ptp_check = jiffies;
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
@@ -5727,7 +5821,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Down\n");
@@ -5826,10 +5920,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
- /* concurent i2c reads are not supported */
- if (test_bit(__IXGBE_READ_I2C, &adapter->state))
- return;
-
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
@@ -6038,7 +6128,7 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter);
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
ixgbe_ptp_overflow_check(adapter);
ixgbe_ptp_rx_hang(adapter);
}
@@ -7247,6 +7337,42 @@ static const struct net_device_ops ixgbe_netdev_ops = {
};
/**
+ * ixgbe_enumerate_functions - Get the number of ports this device has
+ * @adapter: adapter structure
+ *
+ * This function enumerates the phsyical functions co-located on a single slot,
+ * in order to determine how many ports a device has. This is most useful in
+ * determining the required GT/s of PCIe bandwidth necessary for optimal
+ * performance.
+ **/
+static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct list_head *entry;
+ int physfns = 0;
+
+ /* Some cards can not use the generic count PCIe functions method, and
+ * so must be hardcoded to the correct value.
+ */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ physfns = 4;
+ break;
+ default:
+ list_for_each(entry, &adapter->pdev->bus_list) {
+ struct pci_dev *pdev =
+ list_entry(entry, struct pci_dev, bus_list);
+ /* don't count virtual functions */
+ if (!pdev->is_virtfn)
+ physfns++;
+ }
+ }
+
+ return physfns;
+}
+
+/**
* ixgbe_wol_supported - Check whether device supports WoL
* @hw: hw specific details
* @device_id: the device ID
@@ -7328,7 +7454,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
- int i, err, pci_using_dac;
+ int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
#ifdef IXGBE_FCOE
@@ -7483,10 +7609,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->mac.type == ixgbe_mac_82598EB) {
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- e_dev_err("failed to load because an unsupported SFP+ "
- "module type was detected.\n");
- e_dev_err("Reload the driver after installing a supported "
- "module.\n");
+ e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
+ e_dev_err("Reload the driver after installing a supported module.\n");
goto err_sw_init;
} else if (err) {
e_dev_err("HW Init failed: %d\n", err);
@@ -7617,7 +7741,7 @@ skip_sriov:
/* pick up the PCI bus settings for reporting later */
hw->mac.ops.get_bus_info(hw);
- if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP)
+ if (ixgbe_pcie_from_parent(hw))
ixgbe_get_parent_bus_info(adapter);
/* print bus type/speed/width info */
@@ -7643,12 +7767,20 @@ skip_sriov:
e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, part_str);
- if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
- e_dev_warn("PCI-Express bandwidth available for this card is "
- "not sufficient for optimal performance.\n");
- e_dev_warn("For optimal performance a x8 PCI-Express slot "
- "is required.\n");
+ /* calculate the expected PCIe bandwidth required for optimal
+ * performance. Note that some older parts will never have enough
+ * bandwidth due to being older generation PCIe parts. We clamp these
+ * parts to ensure no warning is displayed if it can't be fixed.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
+ break;
+ default:
+ expected_gts = ixgbe_enumerate_functions(adapter) * 10;
+ break;
}
+ ixgbe_check_minimum_link(adapter, expected_gts);
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e5691ccbce9d..e4c676006be9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -204,7 +204,83 @@ out:
}
/**
+ * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ * the SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ u32 i, data, command;
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address command did not complete.\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /* Address cycle complete, setup and write the read
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY read command didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /* Read operation is complete. Get the data
+ * from MSRWD
+ */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)(data);
+
+ return 0;
+}
+
+/**
* ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ * using the SWFW lock - this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
* @phy_data: Pointer to read data from PHY register
@@ -212,10 +288,7 @@ out:
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- u32 command;
- u32 i;
- u32 data;
- s32 status = 0;
+ s32 status;
u16 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -223,86 +296,93 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
+ status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
status = IXGBE_ERR_SWFW_SYNC;
+ }
- if (status == 0) {
- /* Setup and write the address cycle command */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+ return status;
+}
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+/**
+ * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
+ * without SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 i, command;
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address command did not complete.\n");
- status = IXGBE_ERR_PHY;
- }
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
- if (status == 0) {
- /*
- * Address cycle complete, setup and write the read
- * command
- */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad <<
- IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle
- * completed. The MDI Command bit will clear when the
- * operation is complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ /*
+ * Address cycle complete, setup and write the write
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY read command didn't complete\n");
- status = IXGBE_ERR_PHY;
- } else {
- /*
- * Read operation is complete. Get the data
- * from MSRWD
- */
- data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
- data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
- *phy_data = (u16)(data);
- }
- }
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
- hw->mac.ops.release_swfw_sync(hw, gssr);
+ /* Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
}
- return status;
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
}
/**
* ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ * using SWFW lock- this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: 5 bit device type
@@ -311,9 +391,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- u32 command;
- u32 i;
- s32 status = 0;
+ s32 status;
u16 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -321,74 +399,12 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
- status = IXGBE_ERR_SWFW_SYNC;
-
- if (status == 0) {
- /* Put the data in the MDI single read and write data register*/
- IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
-
- /* Setup and write the address cycle command */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address cmd didn't complete\n");
- status = IXGBE_ERR_PHY;
- }
-
- if (status == 0) {
- /*
- * Address cycle complete, setup and write the write
- * command
- */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad <<
- IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle
- * completed. The MDI Command bit will clear when the
- * operation is complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address cmd didn't complete\n");
- status = IXGBE_ERR_PHY;
- }
- }
-
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
}
return status;
@@ -775,6 +791,8 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
* Read control word from PHY init contents offset
*/
ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+ if (ret_val)
+ goto err_eeprom;
control = (eword & IXGBE_CONTROL_MASK_NL) >>
IXGBE_CONTROL_SHIFT_NL;
edata = eword & IXGBE_DATA_MASK_NL;
@@ -787,10 +805,15 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
case IXGBE_DATA_NL:
hw_dbg(hw, "DATA:\n");
data_offset++;
- hw->eeprom.ops.read(hw, data_offset++,
- &phy_offset);
+ ret_val = hw->eeprom.ops.read(hw, data_offset++,
+ &phy_offset);
+ if (ret_val)
+ goto err_eeprom;
for (i = 0; i < edata; i++) {
- hw->eeprom.ops.read(hw, data_offset, &eword);
+ ret_val = hw->eeprom.ops.read(hw, data_offset,
+ &eword);
+ if (ret_val)
+ goto err_eeprom;
hw->phy.ops.write_reg(hw, phy_offset,
MDIO_MMD_PMAPMD, eword);
hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
@@ -822,12 +845,42 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
out:
return ret_val;
+
+err_eeprom:
+ hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+ return IXGBE_ERR_PHY;
}
/**
- * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * ixgbe_identify_module_generic - Identifies module type
* @hw: pointer to hardware structure
*
+ * Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ status = ixgbe_identify_sfp_module_generic(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ status = ixgbe_identify_qsfp_module_generic(hw);
+ break;
+ default:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * @hw: pointer to hardware structure
+*
* Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
@@ -1106,6 +1159,197 @@ err_read_i2c_eeprom:
}
/**
+ * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u16 enforce_sfp = 0;
+ u8 connector = 0;
+ u8 cable_length = 0;
+ u8 device_tech = 0;
+ bool active_cable = false;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ hw->phy.id = identifier;
+
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
+ &comp_codes_10g);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
+ &comp_codes_1g);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
+ hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
+ } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
+ } else {
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
+ active_cable = true;
+
+ if (!active_cable) {
+ /* check for active DA cables that pre-date
+ * SFF-8436 v3.6
+ */
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CONNECTOR,
+ &connector);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CABLE_LENGTH,
+ &cable_length);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_DEVICE_TECH,
+ &device_tech);
+
+ if ((connector ==
+ IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
+ (cable_length > 0) &&
+ ((device_tech >> 4) ==
+ IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
+ active_cable = true;
+ }
+
+ if (active_cable) {
+ hw->phy.type = ixgbe_phy_qsfp_active_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ /* unsupported module type */
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the QSFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor for optical modules */
+ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
+ hw->phy.type = ixgbe_phy_qsfp_intel;
+ else
+ hw->phy.type = ixgbe_phy_qsfp_unknown;
+
+ hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_qsfp_intel) {
+ status = 0;
+ } else {
+ if (hw->allow_unsupported_sfp == true) {
+ e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+ status = 0;
+ } else {
+ hw_dbg(hw,
+ "QSFP module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ status = 0;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
* ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
* @list_offset: offset to the SFP ID list
@@ -1147,7 +1391,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
- hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
+ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
+ hw_err(hw, "eeprom read at %d failed\n",
+ IXGBE_PHY_INIT_OFFSET_NL);
+ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ }
if ((!*list_offset) || (*list_offset == 0xFFFF))
return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
@@ -1159,12 +1407,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* Find the matching SFP ID in the EEPROM
* and program the init sequence
*/
- hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ goto err_phy;
while (sfp_id != IXGBE_PHY_INIT_END_NL) {
if (sfp_id == sfp_type) {
(*list_offset)++;
- hw->eeprom.ops.read(hw, *list_offset, data_offset);
+ if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
+ goto err_phy;
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
hw_dbg(hw, "SFP+ module not supported\n");
return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1174,7 +1424,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
} else {
(*list_offset) += 2;
if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
- return IXGBE_ERR_PHY;
+ goto err_phy;
}
}
@@ -1184,6 +1434,10 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
}
return 0;
+
+err_phy:
+ hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
+ return IXGBE_ERR_PHY;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 886a3431cf5b..24af12e3719e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -33,17 +33,28 @@
#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
/* EEPROM byte offsets */
-#define IXGBE_SFF_IDENTIFIER 0x0
-#define IXGBE_SFF_IDENTIFIER_SFP 0x3
-#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
-#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
-#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
-#define IXGBE_SFF_1GBE_COMP_CODES 0x6
-#define IXGBE_SFF_10GBE_COMP_CODES 0x3
-#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
-#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
-#define IXGBE_SFF_SFF_8472_SWAP 0x5C
-#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_IDENTIFIER 0x0
+#define IXGBE_SFF_IDENTIFIER_SFP 0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define IXGBE_SFF_1GBE_COMP_CODES 0x6
+#define IXGBE_SFF_10GBE_COMP_CODES 0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_SFF_8472_OSCB 0x6E
+#define IXGBE_SFF_SFF_8472_ESCB 0x76
+#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
+#define IXGBE_SFF_QSFP_CONNECTOR 0x82
+#define IXGBE_SFF_QSFP_10GBE_COMP 0x83
+#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
+#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
+#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -54,7 +65,14 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
+#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -102,6 +120,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -121,7 +143,9 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 331987d6815c..5184e2a1a7d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -885,8 +885,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
ixgbe_ptp_reset(adapter);
- /* set the flag that PTP has been enabled */
- adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
+ /* enter the IXGBE_PTP_RUNNING state */
+ set_bit(__IXGBE_PTP_RUNNING, &adapter->state);
return;
}
@@ -899,10 +899,12 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
*/
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
{
- /* stop the overflow check task */
- adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
- IXGBE_FLAG2_PTP_PPS_ENABLED);
+ /* Leave the IXGBE_PTP_RUNNING state. */
+ if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
+ return;
+ /* stop the PPS signal */
+ adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
ixgbe_ptp_setup_sdp(adapter);
cancel_work_sync(&adapter->ptp_tx_work);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1e7d587c4e57..276d7b135332 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -173,39 +173,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
ixgbe_disable_sriov(adapter);
}
-static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *vfdev;
- int dev_id;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- dev_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- dev_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- return false;
- }
-
- /* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
- while (vfdev) {
- /* if we don't own it we don't care */
- if (vfdev->is_virtfn && vfdev->physfn == pdev) {
- /* if it is assigned we cannot release it */
- if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
-
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
- }
-
- return false;
-}
-
#endif /* #ifdef CONFIG_PCI_IOV */
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
@@ -235,7 +202,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
* without causing issues, so just leave the hardware
* available but disabled
*/
- if (ixgbe_vfs_are_assigned(adapter)) {
+ if (pci_vfs_assigned(adapter->pdev)) {
e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
return -EPERM;
}
@@ -672,8 +639,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
- u32 reg, msgbuf[4];
- u32 reg_offset, vf_shift;
+ u32 reg, reg_offset, vf_shift;
+ u32 msgbuf[4] = {0, 0, 0, 0};
u8 *addr = (u8 *)(&msgbuf[1]);
e_info(probe, "VF Reset msg received from vf %d\n", vf);
@@ -768,6 +735,29 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
}
+static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 vlvf;
+ s32 regindex;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the vlan id in the VLVF entries */
+ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ /* Return a negative value if not found */
+ if (regindex >= IXGBE_VLVF_ENTRIES)
+ regindex = -1;
+
+ return regindex;
+}
+
static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
@@ -775,6 +765,9 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
int err;
+ s32 reg_ndx;
+ u32 vlvf;
+ u32 bits;
u8 tcs = netdev_get_num_tc(adapter->netdev);
if (adapter->vfinfo[vf].pf_vlan || tcs) {
@@ -790,10 +783,50 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
+ /* in case of promiscuous mode any VLAN filter set for a VF must
+ * also have the PF pool added to it.
+ */
+ if (add && adapter->netdev->flags & IFF_PROMISC)
+ err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
+
err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (!err && adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && adapter->netdev->flags & IFF_PROMISC) {
+ reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
+ if (reg_ndx < 0)
+ goto out;
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ if (VMDQ_P(0) < 32) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
+ bits &= ~(1 << VMDQ_P(0));
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(reg_ndx * 2) + 1);
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(reg_ndx * 2) + 1);
+ bits &= ~(1 << (VMDQ_P(0) - 32));
+ bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
+ }
+
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid &&
+ !test_bit(vid, adapter->active_vlans) && !bits)
+ ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
+ }
+
+out:
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 70c6aa3d3f95..6442cf8f9dce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -69,6 +69,7 @@
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
#define IXGBE_DEV_ID_X540T1 0x1560
/* VF Device IDs */
@@ -1520,9 +1521,11 @@ enum {
#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */
+#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
/* LEDCTL Bit Masks */
#define IXGBE_LED_IVRT_BASE 0x00000040
@@ -1593,6 +1596,7 @@ enum {
#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
#define IXGBE_MACC_FLU 0x00000001
@@ -2582,6 +2586,10 @@ enum ixgbe_phy_type {
ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
+ ixgbe_phy_qsfp_passive_unknown,
+ ixgbe_phy_qsfp_active_unknown,
+ ixgbe_phy_qsfp_intel,
+ ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported,
ixgbe_phy_generic
};
@@ -2622,6 +2630,8 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_fixed,
+ ixgbe_media_type_fiber_qsfp,
ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
@@ -2838,6 +2848,7 @@ struct ixgbe_mac_operations {
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
+ void (*stop_link_on_d3)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2885,6 +2896,8 @@ struct ixgbe_phy_operations {
s32 (*reset)(struct ixgbe_hw *);
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
@@ -2953,6 +2966,7 @@ struct ixgbe_phy_info {
bool smart_speed_active;
bool multispeed_fiber;
bool reset_if_overtemp;
+ bool qsfp_shared_i2c_bus;
};
#include "ixgbe_mbx.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f5166ad6bb5..59a62bbfb371 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -488,8 +488,8 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* source pruning.
*/
if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
- !(compare_ether_addr(adapter->netdev->dev_addr,
- eth_hdr(skb)->h_source))) {
+ ether_addr_equal(adapter->netdev->dev_addr,
+ eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
goto next_desc;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c35db735958f..7fb5677451f9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2641,7 +2641,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
return ret;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
pd->tx_csum_limit : 9 * 1024;
@@ -2833,7 +2833,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
struct resource *res;
int err;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd == NULL) {
dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b017818bccae..e35bac7cfdf1 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -79,10 +79,10 @@
#define MVNETA_MAC_ADDR_HIGH 0x2418
#define MVNETA_SDMA_CONFIG 0x241c
#define MVNETA_SDMA_BRST_SIZE_16 4
-#define MVNETA_NO_DESC_SWAP 0x0
#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
#define MVNETA_RX_NO_DATA_SWAP BIT(4)
#define MVNETA_TX_NO_DATA_SWAP BIT(5)
+#define MVNETA_DESC_SWAP BIT(6)
#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
#define MVNETA_PORT_STATUS 0x2444
#define MVNETA_TX_IN_PRGRS BIT(1)
@@ -138,7 +138,9 @@
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
#define MVNETA_MIB_COUNTERS_BASE 0x3080
#define MVNETA_MIB_LATE_COLLISION 0x7c
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
@@ -264,8 +266,7 @@ struct mvneta_port {
* layout of the transmit and reception DMA descriptors, and their
* layout is therefore defined by the hardware design
*/
-struct mvneta_tx_desc {
- u32 command; /* Options used by HW for packet transmitting.*/
+
#define MVNETA_TX_L3_OFF_SHIFT 0
#define MVNETA_TX_IP_HLEN_SHIFT 8
#define MVNETA_TX_L4_UDP BIT(16)
@@ -280,15 +281,6 @@ struct mvneta_tx_desc {
#define MVNETA_TX_L4_CSUM_FULL BIT(30)
#define MVNETA_TX_L4_CSUM_NOT BIT(31)
- u16 reserverd1; /* csum_l4 (for future use) */
- u16 data_size; /* Data size of transmitted packet in bytes */
- u32 buf_phys_addr; /* Physical addr of transmitted buffer */
- u32 reserved2; /* hw_cmd - (for future use, PMT) */
- u32 reserved3[4]; /* Reserved - (for future use) */
-};
-
-struct mvneta_rx_desc {
- u32 status; /* Info about received packet */
#define MVNETA_RXD_ERR_CRC 0x0
#define MVNETA_RXD_ERR_SUMMARY BIT(16)
#define MVNETA_RXD_ERR_OVERRUN BIT(17)
@@ -299,16 +291,57 @@ struct mvneta_rx_desc {
#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
#define MVNETA_RXD_L4_CSUM_OK BIT(30)
+#if defined(__LITTLE_ENDIAN)
+struct mvneta_tx_desc {
+ u32 command; /* Options used by HW for packet transmitting.*/
+ u16 reserverd1; /* csum_l4 (for future use) */
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u32 status; /* Info about received packet */
u16 reserved1; /* pnc_info - (for future use, PnC) */
u16 data_size; /* Size of received packet in bytes */
+
u32 buf_phys_addr; /* Physical address of the buffer */
u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+
u32 buf_cookie; /* cookie for access to RX buffer in rx path */
u16 reserved3; /* prefetch_cmd, for future use */
u16 reserved4; /* csum_l4 - (for future use, PnC) */
+
+ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
+ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
+};
+#else
+struct mvneta_tx_desc {
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u16 reserverd1; /* csum_l4 (for future use) */
+ u32 command; /* Options used by HW for packet transmitting.*/
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u16 data_size; /* Size of received packet in bytes */
+ u16 reserved1; /* pnc_info - (for future use, PnC) */
+ u32 status; /* Info about received packet */
+
+ u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+ u32 buf_phys_addr; /* Physical address of the buffer */
+
+ u16 reserved4; /* csum_l4 - (for future use, PnC) */
+ u16 reserved3; /* prefetch_cmd, for future use */
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+
u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
};
+#endif
struct mvneta_tx_queue {
/* Number of this TX queue, in the range 0-7 */
@@ -908,13 +941,22 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
/* Default burst size */
val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+ val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
- val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
- MVNETA_NO_DESC_SWAP);
+#if defined(__BIG_ENDIAN)
+ val |= MVNETA_DESC_SWAP;
+#endif
/* Assign port SDMA configuration */
mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
+ /* Disable PHY polling in hardware, since we're using the
+ * kernel phylib to do this.
+ */
+ val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
+ val &= ~MVNETA_PHY_POLLING_ENABLE;
+ mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
+
mvneta_set_ucast_table(pp, -1);
mvneta_set_special_mcast_table(pp, -1);
mvneta_set_other_mcast_table(pp, -1);
@@ -2307,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN);
if (phydev->duplex)
val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2440,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev)
return 0;
}
+static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ if (!pp->phy_dev)
+ return -ENOTSUPP;
+
+ ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
+ if (!ret)
+ mvneta_adjust_link(dev);
+
+ return ret;
+}
+
/* Ethtool methods */
/* Get settings (phy address, speed) for ethtools */
@@ -2558,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_change_mtu = mvneta_change_mtu,
.ndo_tx_timeout = mvneta_tx_timeout,
.ndo_get_stats64 = mvneta_get_stats64,
+ .ndo_do_ioctl = mvneta_ioctl,
};
const struct ethtool_ops mvneta_eth_tool_ops = {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index db481477bcc5..4ae0c7426010 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -583,10 +583,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
* table is full.
*/
if (pep->htpr == NULL) {
- pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma,
- GFP_KERNEL | __GFP_ZERO);
+ pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
if (pep->htpr == NULL)
return -ENOMEM;
} else {
@@ -1024,9 +1023,9 @@ static int rxq_init(struct net_device *dev)
pep->rx_desc_count = 0;
size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size;
- pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_rx_desc_area)
goto out;
@@ -1085,9 +1084,9 @@ static int txq_init(struct net_device *dev)
pep->tx_desc_count = 0;
size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size;
- pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_tx_desc_area)
goto out;
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
@@ -1517,7 +1516,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
eth_hw_addr_random(dev);
- pep->pd = pdev->dev.platform_data;
+ pep->pd = dev_get_platdata(&pdev->dev);
pep->rx_ring_size = NUM_RX_DESCS;
if (pep->pd->rx_queue_size)
pep->rx_ring_size = pep->pd->rx_queue_size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 299d0184f983..ea20182c6969 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -800,7 +800,16 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
-int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return -EPERM;
+}
+
+static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
@@ -1252,6 +1261,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.wrapper = MLX4_CMD_UPDATE_QP_wrapper
},
{
+ .opcode = MLX4_CMD_GET_OP_REQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = MLX4_CMD_GET_OP_REQ_wrapper,
+ },
+ {
.opcode = MLX4_CMD_CONF_SPECIAL_QP,
.has_inbox = false,
.has_outbox = false,
@@ -1526,7 +1544,7 @@ static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
}
-int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
+static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
int slave, int port)
{
struct mlx4_vport_oper_state *vp_oper;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 9d4a1ea030d8..b4881b686159 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -160,6 +160,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
int err;
@@ -169,15 +170,17 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
pfc->mbc,
pfc->delay);
- priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
- priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
+ prof->rx_pause = !pfc->pfc_en;
+ prof->tx_pause = !pfc->pfc_en;
+ prof->rx_ppp = pfc->pfc_en;
+ prof->tx_ppp = pfc->pfc_en;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
+ prof->tx_pause,
+ prof->tx_ppp,
+ prof->rx_pause,
+ prof->rx_ppp);
if (err)
en_err(priv, "Failed setting pause params\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 7c492382da09..0698c82d6ff1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
}
+static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring, int index,
+ u8 owner)
+{
+ __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+ void *end = ring->buf + ring->buf_size;
+ __be32 *ptr = (__be32 *)tx_desc;
+ int i;
+
+ /* Optimize the common case when there are no wraparounds */
+ if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ }
+ } else {
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ if ((void *)ptr >= end) {
+ ptr = ring->buf;
+ stamp ^= cpu_to_be32(0x80000000);
+ }
+ }
+ }
+}
+
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
void *end = ring->buf + ring->buf_size;
int frags = skb_shinfo(skb)->nr_frags;
int i;
- __be32 *ptr = (__be32 *)tx_desc;
- __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
struct skb_shared_hwtstamps hwts;
if (timestamp) {
@@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
- /* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
- *ptr = stamp;
- ptr += STAMP_DWORDS;
- }
-
} else {
if (!tx_info->inl) {
if ((void *) data >= end) {
@@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
++data;
}
}
- /* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
- *ptr = stamp;
- ptr += STAMP_DWORDS;
- if ((void *) ptr >= end) {
- ptr = ring->buf;
- stamp ^= cpu_to_be32(0x80000000);
- }
- }
-
}
dev_kfree_skb_any(skb);
return tx_info->nr_txbb;
@@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
struct mlx4_cqe *cqe;
u16 index;
- u16 new_index, ring_index;
+ u16 new_index, ring_index, stamp_index;
u32 txbbs_skipped = 0;
+ u32 txbbs_stamp = 0;
u32 cons_index = mcq->cons_index;
int size = cq->size;
u32 size_mask = ring->size_mask;
@@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
ring_index = ring->cons & size_mask;
+ stamp_index = ring_index;
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -345,6 +362,15 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
*/
rmb();
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR)) {
+ struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
+
+ en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
+ cqe_err->vendor_err_syndrome,
+ cqe_err->syndrome);
+ }
+
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
@@ -359,6 +385,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
priv, ring, ring_index,
!!((ring->cons + txbbs_skipped) &
ring->size), timestamp);
+
+ mlx4_en_stamp_wqe(priv, ring, stamp_index,
+ !!((ring->cons + txbbs_stamp) &
+ ring->size));
+ stamp_index = ring_index;
+ txbbs_stamp = txbbs_skipped;
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
} while (ring_index != new_index);
@@ -556,17 +588,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct device *ddev = priv->ddev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
- struct skb_frag_struct *frag;
struct mlx4_en_tx_info *tx_info;
- struct ethhdr *ethh;
int tx_ind = 0;
int nr_txbb;
int desc_size;
int real_size;
- dma_addr_t dma;
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
@@ -642,6 +672,61 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
+ if (lso_header_size)
+ data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
+ DS_SIZE));
+ else
+ data = &tx_desc->data;
+
+ /* valid only for none inline segments */
+ tx_info->data_offset = (void *)data - (void *)tx_desc;
+
+ tx_info->linear = (lso_header_size < skb_headlen(skb) &&
+ !is_inline(skb, NULL)) ? 1 : 0;
+
+ data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+ if (is_inline(skb, &fragptr)) {
+ tx_info->inl = 1;
+ } else {
+ /* Map fragments */
+ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+
+ frag = &skb_shinfo(skb)->frags[i];
+ dma = skb_frag_dma_map(ddev, frag,
+ 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(skb_frag_size(frag));
+ --data;
+ }
+
+ /* Map linear part */
+ if (tx_info->linear) {
+ u32 byte_count = skb_headlen(skb) - lso_header_size;
+ dma_addr_t dma;
+
+ dma = dma_map_single(ddev, skb->data +
+ lso_header_size, byte_count,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(byte_count);
+ }
+ tx_info->inl = 0;
+ }
+
/*
* For timestamping add flag to skb_shinfo and
* set flag for further reference
@@ -666,6 +751,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
+ struct ethhdr *ethh;
+
/* Copy dst mac address to wqe. This allows loopback in eSwitch,
* so that VFs and PF can communicate with each other
*/
@@ -688,8 +775,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Copy headers;
* note that we already verified that it is linear */
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
- data = ((void *) &tx_desc->lso +
- ALIGN(lso_header_size + 4, DS_SIZE));
priv->port_stats.tso_packets++;
i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
@@ -701,7 +786,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
- data = &tx_desc->data;
tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
ring->packets++;
@@ -710,38 +794,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
-
- /* valid only for none inline segments */
- tx_info->data_offset = (void *) data - (void *) tx_desc;
-
- tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
- data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
-
- if (!is_inline(skb, &fragptr)) {
- /* Map fragments */
- for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
- frag = &skb_shinfo(skb)->frags[i];
- dma = skb_frag_dma_map(priv->ddev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
- data->addr = cpu_to_be64(dma);
- data->lkey = cpu_to_be32(mdev->mr.key);
- wmb();
- data->byte_count = cpu_to_be32(skb_frag_size(frag));
- --data;
- }
-
- /* Map linear part */
- if (tx_info->linear) {
- dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
- skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
- data->addr = cpu_to_be64(dma);
- data->lkey = cpu_to_be32(mdev->mr.key);
- wmb();
- data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
- }
- tx_info->inl = 0;
- } else {
+ if (tx_info->inl) {
build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
tx_info->inl = 1;
}
@@ -781,6 +834,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
+tx_drop_unmap:
+ en_err(priv, "DMA mapping error\n");
+
+ for (i++; i < skb_shinfo(skb)->nr_frags; i++) {
+ data++;
+ dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ }
+
tx_drop:
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 7e042869ef0c..0416c5b3b35c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -79,6 +79,7 @@ enum {
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
(1ull << MLX4_EVENT_TYPE_CMD) | \
+ (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
(1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
(1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
(1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
@@ -629,6 +630,14 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
+ case MLX4_EVENT_TYPE_OP_REQUIRED:
+ atomic_inc(&priv->opreq_count);
+ /* FW commands can't be executed from interrupt context
+ * working in deferred task
+ */
+ queue_work(mlx4_wq, &priv->opreq_task);
+ break;
+
case MLX4_EVENT_TYPE_COMM_CHANNEL:
if (!mlx4_is_master(dev)) {
mlx4_warn(dev, "Received comm channel event "
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 6fc6dabc78d5..0d63daa2f422 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1696,3 +1696,107 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_write);
+
+enum {
+ ADD_TO_MCG = 0x26,
+};
+
+
+void mlx4_opreq_action(struct work_struct *work)
+{
+ struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
+ opreq_task);
+ struct mlx4_dev *dev = &priv->dev;
+ int num_tasks = atomic_read(&priv->opreq_count);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ u32 *outbox;
+ u32 modifier;
+ u16 token;
+ u16 type_m;
+ u16 type;
+ int err;
+ u32 num_qps;
+ struct mlx4_qp qp;
+ int i;
+ u8 rem_mcg;
+ u8 prot;
+
+#define GET_OP_REQ_MODIFIER_OFFSET 0x08
+#define GET_OP_REQ_TOKEN_OFFSET 0x14
+#define GET_OP_REQ_TYPE_OFFSET 0x1a
+#define GET_OP_REQ_DATA_OFFSET 0x20
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
+ return;
+ }
+ outbox = mailbox->buf;
+
+ while (num_tasks) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+ MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to retreive required operation: %d\n",
+ err);
+ return;
+ }
+ MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
+ MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
+ MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
+ type_m = type >> 12;
+ type &= 0xfff;
+
+ switch (type) {
+ case ADD_TO_MCG:
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
+ err = EPERM;
+ break;
+ }
+ mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
+ GET_OP_REQ_DATA_OFFSET);
+ num_qps = be32_to_cpu(mgm->members_count) &
+ MGM_QPN_MASK;
+ rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
+ prot = ((u8 *)(&mgm->members_count))[0] >> 6;
+
+ for (i = 0; i < num_qps; i++) {
+ qp.qpn = be32_to_cpu(mgm->qp[i]);
+ if (rem_mcg)
+ err = mlx4_multicast_detach(dev, &qp,
+ mgm->gid,
+ prot, 0);
+ else
+ err = mlx4_multicast_attach(dev, &qp,
+ mgm->gid,
+ mgm->gid[5]
+ , 0, prot,
+ NULL);
+ if (err)
+ break;
+ }
+ break;
+ default:
+ mlx4_warn(dev, "Bad type for required operation\n");
+ err = EINVAL;
+ break;
+ }
+ err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
+ 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to acknowledge required request: %d\n",
+ err);
+ goto out;
+ }
+ memset(outbox, 0, 0xffc);
+ num_tasks = atomic_dec_return(&priv->opreq_count);
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index fdf41665a059..a0a368b7c939 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -220,5 +220,6 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
int mlx4_NOP(struct mlx4_dev *dev);
int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
+void mlx4_opreq_action(struct work_struct *work);
#endif /* MLX4_FW_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 36be3208786a..60c9f4f103fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1692,11 +1692,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_xrcd_table_free;
}
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+ goto err_mr_table_free;
+ }
+ }
+
err = mlx4_init_eq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
- goto err_mr_table_free;
+ goto err_mcg_table_free;
}
err = mlx4_cmd_use_events(dev);
@@ -1746,19 +1754,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- if (!mlx4_is_slave(dev)) {
- err = mlx4_init_mcg_table(dev);
- if (err) {
- mlx4_err(dev, "Failed to initialize "
- "multicast group table, aborting.\n");
- goto err_qp_table_free;
- }
- }
-
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
- goto err_mcg_table_free;
+ goto err_qp_table_free;
}
if (!mlx4_is_slave(dev)) {
@@ -1803,9 +1802,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
-err_mcg_table_free:
- mlx4_cleanup_mcg_table(dev);
-
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1821,6 +1817,10 @@ err_cmd_poll:
err_eq_table_free:
mlx4_cleanup_eq_table(dev);
+err_mcg_table_free:
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_mcg_table(dev);
+
err_mr_table_free:
mlx4_cleanup_mr_table(dev);
@@ -2197,6 +2197,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
}
}
+ atomic_set(&priv->opreq_count, 0);
+ INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
+
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
@@ -2315,12 +2318,12 @@ err_port:
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_counters_table(dev);
- mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
@@ -2403,12 +2406,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
RES_TR_FREE_SLAVES_ONLY);
mlx4_cleanup_counters_table(dev);
- mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f3e804f2a35f..55f6245efb6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -39,19 +39,8 @@
#include "mlx4.h"
-#define MGM_QPN_MASK 0x00FFFFFF
-#define MGM_BLCK_LB_BIT 30
-
static const u8 zero_gid[16]; /* automatically initialized to 0 */
-struct mlx4_mgm {
- __be32 next_gid_index;
- __be32 members_count;
- u32 reserved[2];
- u8 gid[16];
- __be32 qp[MLX4_MAX_QP_PER_MGM];
-};
-
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
return 1 << dev->oper_log_mgm_entry_size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 17d9277e33ef..348bb8c7d9a7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -554,6 +554,17 @@ struct mlx4_mfunc {
struct mlx4_mfunc_master_ctx master;
};
+#define MGM_QPN_MASK 0x00FFFFFF
+#define MGM_BLCK_LB_BIT 30
+
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_MAX_QP_PER_MGM];
+};
+
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
@@ -802,6 +813,8 @@ struct mlx4_priv {
u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
__be64 slave_node_guids[MLX4_MFUNC_MAX];
+ atomic_t opreq_count;
+ struct work_struct opreq_task;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index e393d998be89..0951f7aca1ef 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -705,7 +705,8 @@ static void ks8842_rx_frame(struct net_device *netdev,
ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
}
-void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+static void ks8842_handle_rx(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
{
u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
@@ -715,7 +716,8 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
}
}
-void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+static void ks8842_handle_tx(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
{
u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
@@ -724,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
netif_wake_queue(netdev);
}
-void ks8842_handle_rx_overrun(struct net_device *netdev,
+static void ks8842_handle_rx_overrun(struct net_device *netdev,
struct ks8842_adapter *adapter)
{
netdev_dbg(netdev, "%s: entry\n", __func__);
@@ -732,7 +734,7 @@ void ks8842_handle_rx_overrun(struct net_device *netdev,
netdev->stats.rx_fifo_errors++;
}
-void ks8842_tasklet(unsigned long arg)
+static void ks8842_tasklet(unsigned long arg)
{
struct net_device *netdev = (struct net_device *)arg;
struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -1146,7 +1148,7 @@ static int ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
- struct ks8842_platform_data *pdata = pdev->dev.platform_data;
+ struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
u16 id;
unsigned i;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index ac20098b542a..0fba1532d326 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -688,7 +688,7 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
}
-void ks_enable_qmu(struct ks_net *ks)
+static void ks_enable_qmu(struct ks_net *ks)
{
u16 w;
@@ -1636,7 +1636,7 @@ static int ks8851_probe(struct platform_device *pdev)
} else {
struct ks8851_mll_platform_data *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
netdev_err(netdev, "No platform data\n");
err = -ENODEV;
diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig
new file mode 100644
index 000000000000..1731e050fa27
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Kconfig
@@ -0,0 +1,30 @@
+#
+# MOXART device configuration
+#
+
+config NET_VENDOR_MOXART
+ bool "MOXA ART devices"
+ default y
+ depends on (ARM && ARCH_MOXART)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about MOXA ART devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MOXART
+
+config ARM_MOXART_ETHER
+ tristate "MOXART Ethernet support"
+ depends on ARM && ARCH_MOXART
+ select NET_CORE
+ ---help---
+ If you wish to compile a kernel for a hardware with MOXA ART SoC and
+ want to use the internal ethernet then you should answer Y to this.
+
+
+endif # NET_VENDOR_MOXART
diff --git a/drivers/net/ethernet/moxa/Makefile b/drivers/net/ethernet/moxa/Makefile
new file mode 100644
index 000000000000..aa3c73e9e952
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the MOXART network device drivers.
+#
+
+obj-$(CONFIG_ARM_MOXART_ETHER) += moxart_ether.o
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
new file mode 100644
index 000000000000..83c2091c9c23
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -0,0 +1,559 @@
+/* MOXA ART Ethernet (RTL8201CP) driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/dma-mapping.h>
+
+#include "moxart_ether.h"
+
+static inline void moxart_emac_write(struct net_device *ndev,
+ unsigned int reg, unsigned long value)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ writel(value, priv->base + reg);
+}
+
+static void moxart_update_mac_address(struct net_device *ndev)
+{
+ moxart_emac_write(ndev, REG_MAC_MS_ADDRESS,
+ ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])));
+ moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4,
+ ((ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) |
+ (ndev->dev_addr[5])));
+}
+
+static int moxart_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+ moxart_update_mac_address(ndev);
+
+ return 0;
+}
+
+static void moxart_mac_free_memory(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
+ if (priv->tx_desc_base)
+ dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
+ priv->tx_desc_base, priv->tx_base);
+
+ if (priv->rx_desc_base)
+ dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
+ priv->rx_desc_base, priv->rx_base);
+
+ kfree(priv->tx_buf_base);
+ kfree(priv->rx_buf_base);
+}
+
+static void moxart_mac_reset(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ writel(SW_RST, priv->base + REG_MAC_CTRL);
+ while (readl(priv->base + REG_MAC_CTRL) & SW_RST)
+ mdelay(10);
+
+ writel(0, priv->base + REG_INTERRUPT_MASK);
+
+ priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL;
+}
+
+static void moxart_mac_enable(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ writel(0x00001010, priv->base + REG_INT_TIMER_CTRL);
+ writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL);
+ writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL);
+
+ priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M);
+ writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+
+ priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN);
+ writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
+}
+
+static void moxart_mac_setup_desc_ring(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ void __iomem *desc;
+ int i;
+
+ for (i = 0; i < TX_DESC_NUM; i++) {
+ desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE;
+ memset(desc, 0, TX_REG_DESC_SIZE);
+
+ priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
+ }
+ writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
+
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
+ memset(desc, 0, RX_REG_DESC_SIZE);
+ writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+ writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
+ desc + RX_REG_OFFSET_DESC1);
+
+ priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
+ priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_buf[i],
+ priv->rx_buf_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ netdev_err(ndev, "DMA mapping error\n");
+
+ writel(priv->rx_mapping[i],
+ desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
+ writel(priv->rx_buf[i],
+ desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
+ }
+ writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
+
+ priv->rx_head = 0;
+
+ /* reset the MAC controler TX/RX desciptor base address */
+ writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
+ writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
+}
+
+static int moxart_mac_open(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ napi_enable(&priv->napi);
+
+ moxart_mac_reset(ndev);
+ moxart_update_mac_address(ndev);
+ moxart_mac_setup_desc_ring(ndev);
+ moxart_mac_enable(ndev);
+ netif_start_queue(ndev);
+
+ netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n",
+ __func__, readl(priv->base + REG_INTERRUPT_MASK),
+ readl(priv->base + REG_MAC_CTRL));
+
+ return 0;
+}
+
+static int moxart_mac_stop(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ napi_disable(&priv->napi);
+
+ netif_stop_queue(ndev);
+
+ /* disable all interrupts */
+ writel(0, priv->base + REG_INTERRUPT_MASK);
+
+ /* disable all functions */
+ writel(0, priv->base + REG_MAC_CTRL);
+
+ return 0;
+}
+
+static int moxart_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct moxart_mac_priv_t *priv = container_of(napi,
+ struct moxart_mac_priv_t,
+ napi);
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ void __iomem *desc;
+ unsigned int desc0, len;
+ int rx_head = priv->rx_head;
+ int rx = 0;
+
+ while (1) {
+ desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
+ desc0 = readl(desc + RX_REG_OFFSET_DESC0);
+
+ if (desc0 & RX_DESC0_DMA_OWN)
+ break;
+
+ if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
+ RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
+ net_dbg_ratelimited("packet error\n");
+ priv->stats.rx_dropped++;
+ priv->stats.rx_errors++;
+ continue;
+ }
+
+ len = desc0 & RX_DESC0_FRAME_LEN_MASK;
+
+ if (len > RX_BUF_SIZE)
+ len = RX_BUF_SIZE;
+
+ skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
+ if (unlikely(!skb)) {
+ net_dbg_ratelimited("build_skb failed\n");
+ priv->stats.rx_dropped++;
+ priv->stats.rx_errors++;
+ }
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+ rx++;
+
+ ndev->last_rx = jiffies;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += len;
+ if (desc0 & RX_DESC0_MULTICAST)
+ priv->stats.multicast++;
+
+ writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+
+ rx_head = RX_NEXT(rx_head);
+ priv->rx_head = rx_head;
+
+ if (rx >= budget)
+ break;
+ }
+
+ if (rx < budget) {
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
+ }
+
+ priv->reg_imr |= RPKT_FINISH_M;
+ writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+
+ return rx;
+}
+
+static void moxart_tx_finished(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ unsigned tx_head = priv->tx_head;
+ unsigned tx_tail = priv->tx_tail;
+
+ while (tx_tail != tx_head) {
+ dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ priv->tx_len[tx_tail], DMA_TO_DEVICE);
+
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
+
+ dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
+ priv->tx_skb[tx_tail] = NULL;
+
+ tx_tail = TX_NEXT(tx_tail);
+ }
+ priv->tx_tail = tx_tail;
+}
+
+static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *) dev_id;
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS);
+
+ if (ists & XPKT_OK_INT_STS)
+ moxart_tx_finished(ndev);
+
+ if (ists & RPKT_FINISH) {
+ if (napi_schedule_prep(&priv->napi)) {
+ priv->reg_imr &= ~RPKT_FINISH_M;
+ writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ void __iomem *desc;
+ unsigned int len;
+ unsigned int tx_head = priv->tx_head;
+ u32 txdes1;
+ int ret = NETDEV_TX_BUSY;
+
+ desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
+
+ spin_lock_irq(&priv->txlock);
+ if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
+ net_dbg_ratelimited("no TX space for packet\n");
+ priv->stats.tx_dropped++;
+ goto out_unlock;
+ }
+
+ len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
+
+ priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ netdev_err(ndev, "DMA mapping error\n");
+ goto out_unlock;
+ }
+
+ priv->tx_len[tx_head] = len;
+ priv->tx_skb[tx_head] = skb;
+
+ writel(priv->tx_mapping[tx_head],
+ desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
+ writel(skb->data,
+ desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
+
+ if (skb->len < ETH_ZLEN) {
+ memset(&skb->data[skb->len],
+ 0, ETH_ZLEN - skb->len);
+ len = ETH_ZLEN;
+ }
+
+ txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
+ txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
+ txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
+ txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
+ writel(txdes1, desc + TX_REG_OFFSET_DESC1);
+ writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
+
+ /* start to send packet */
+ writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
+
+ priv->tx_head = TX_NEXT(tx_head);
+
+ ndev->trans_start = jiffies;
+ ret = NETDEV_TX_OK;
+out_unlock:
+ spin_unlock_irq(&priv->txlock);
+
+ return ret;
+}
+
+static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ return &priv->stats;
+}
+
+static void moxart_mac_setmulticast(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ int crc_val;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ crc_val = crc32_le(~0, ha->addr, ETH_ALEN);
+ crc_val = (crc_val >> 26) & 0x3f;
+ if (crc_val >= 32) {
+ writel(readl(priv->base + REG_MCAST_HASH_TABLE1) |
+ (1UL << (crc_val - 32)),
+ priv->base + REG_MCAST_HASH_TABLE1);
+ } else {
+ writel(readl(priv->base + REG_MCAST_HASH_TABLE0) |
+ (1UL << crc_val),
+ priv->base + REG_MCAST_HASH_TABLE0);
+ }
+ }
+}
+
+static void moxart_mac_set_rx_mode(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ spin_lock_irq(&priv->txlock);
+
+ (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) :
+ (priv->reg_maccr &= ~RCV_ALL);
+
+ (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) :
+ (priv->reg_maccr &= ~RX_MULTIPKT);
+
+ if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) {
+ priv->reg_maccr |= HT_MULTI_EN;
+ moxart_mac_setmulticast(ndev);
+ } else {
+ priv->reg_maccr &= ~HT_MULTI_EN;
+ }
+
+ writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
+
+ spin_unlock_irq(&priv->txlock);
+}
+
+static struct net_device_ops moxart_netdev_ops = {
+ .ndo_open = moxart_mac_open,
+ .ndo_stop = moxart_mac_stop,
+ .ndo_start_xmit = moxart_mac_start_xmit,
+ .ndo_get_stats = moxart_mac_get_stats,
+ .ndo_set_rx_mode = moxart_mac_set_rx_mode,
+ .ndo_set_mac_address = moxart_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int moxart_mac_probe(struct platform_device *pdev)
+{
+ struct device *p_dev = &pdev->dev;
+ struct device_node *node = p_dev->of_node;
+ struct net_device *ndev;
+ struct moxart_mac_priv_t *priv;
+ struct resource *res;
+ unsigned int irq;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t));
+ if (!ndev)
+ return -ENOMEM;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ netdev_err(ndev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ndev->base_addr = res->start;
+ priv->base = devm_ioremap_resource(p_dev, res);
+ ret = IS_ERR(priv->base);
+ if (ret) {
+ dev_err(p_dev, "devm_ioremap_resource failed\n");
+ goto init_fail;
+ }
+
+ spin_lock_init(&priv->txlock);
+
+ priv->tx_buf_size = TX_BUF_SIZE;
+ priv->rx_buf_size = RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
+ TX_DESC_NUM, &priv->tx_base,
+ GFP_DMA | GFP_KERNEL);
+ if (priv->tx_desc_base == NULL)
+ goto init_fail;
+
+ priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
+ RX_DESC_NUM, &priv->rx_base,
+ GFP_DMA | GFP_KERNEL);
+ if (priv->rx_desc_base == NULL)
+ goto init_fail;
+
+ priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
+ GFP_ATOMIC);
+ if (!priv->tx_buf_base)
+ goto init_fail;
+
+ priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
+ GFP_ATOMIC);
+ if (!priv->rx_buf_base)
+ goto init_fail;
+
+ platform_set_drvdata(pdev, ndev);
+
+ ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0,
+ pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto init_fail;
+ }
+
+ ether_setup(ndev);
+ ndev->netdev_ops = &moxart_netdev_ops;
+ netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->irq = irq;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ free_netdev(ndev);
+ goto init_fail;
+ }
+
+ netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
+ __func__, ndev->irq, ndev->dev_addr);
+
+ return 0;
+
+init_fail:
+ netdev_err(ndev, "init failed\n");
+ moxart_mac_free_memory(ndev);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ unregister_netdev(ndev);
+ free_irq(ndev->irq, ndev);
+ moxart_mac_free_memory(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_mac_match[] = {
+ { .compatible = "moxa,moxart-mac" },
+ { }
+};
+
+struct __initdata platform_driver moxart_mac_driver = {
+ .probe = moxart_mac_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-ethernet",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_mac_match,
+ },
+};
+module_platform_driver(moxart_mac_driver);
+
+MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
new file mode 100644
index 000000000000..2be9280d608c
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -0,0 +1,330 @@
+/* MOXA ART Ethernet (RTL8201CP) driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _MOXART_ETHERNET_H
+#define _MOXART_ETHERNET_H
+
+#define TX_REG_OFFSET_DESC0 0
+#define TX_REG_OFFSET_DESC1 4
+#define TX_REG_OFFSET_DESC2 8
+#define TX_REG_DESC_SIZE 16
+
+#define RX_REG_OFFSET_DESC0 0
+#define RX_REG_OFFSET_DESC1 4
+#define RX_REG_OFFSET_DESC2 8
+#define RX_REG_DESC_SIZE 16
+
+#define TX_DESC0_PKT_LATE_COL 0x1 /* abort, late collision */
+#define TX_DESC0_RX_PKT_EXS_COL 0x2 /* abort, >16 collisions */
+#define TX_DESC0_DMA_OWN 0x80000000 /* owned by controller */
+#define TX_DESC1_BUF_SIZE_MASK 0x7ff
+#define TX_DESC1_LTS 0x8000000 /* last TX packet */
+#define TX_DESC1_FTS 0x10000000 /* first TX packet */
+#define TX_DESC1_FIFO_COMPLETE 0x20000000
+#define TX_DESC1_INTR_COMPLETE 0x40000000
+#define TX_DESC1_END 0x80000000
+#define TX_DESC2_ADDRESS_PHYS 0
+#define TX_DESC2_ADDRESS_VIRT 4
+
+#define RX_DESC0_FRAME_LEN 0
+#define RX_DESC0_FRAME_LEN_MASK 0x7FF
+#define RX_DESC0_MULTICAST 0x10000
+#define RX_DESC0_BROADCAST 0x20000
+#define RX_DESC0_ERR 0x40000
+#define RX_DESC0_CRC_ERR 0x80000
+#define RX_DESC0_FTL 0x100000
+#define RX_DESC0_RUNT 0x200000 /* packet less than 64 bytes */
+#define RX_DESC0_ODD_NB 0x400000 /* receive odd nibbles */
+#define RX_DESC0_LRS 0x10000000 /* last receive segment */
+#define RX_DESC0_FRS 0x20000000 /* first receive segment */
+#define RX_DESC0_DMA_OWN 0x80000000
+#define RX_DESC1_BUF_SIZE_MASK 0x7FF
+#define RX_DESC1_END 0x80000000
+#define RX_DESC2_ADDRESS_PHYS 0
+#define RX_DESC2_ADDRESS_VIRT 4
+
+#define TX_DESC_NUM 64
+#define TX_DESC_NUM_MASK (TX_DESC_NUM-1)
+#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
+#define TX_BUF_SIZE 1600
+#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
+
+#define RX_DESC_NUM 64
+#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
+#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK))
+#define RX_BUF_SIZE 1600
+#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1)
+
+#define REG_INTERRUPT_STATUS 0
+#define REG_INTERRUPT_MASK 4
+#define REG_MAC_MS_ADDRESS 8
+#define REG_MAC_LS_ADDRESS 12
+#define REG_MCAST_HASH_TABLE0 16
+#define REG_MCAST_HASH_TABLE1 20
+#define REG_TX_POLL_DEMAND 24
+#define REG_RX_POLL_DEMAND 28
+#define REG_TXR_BASE_ADDRESS 32
+#define REG_RXR_BASE_ADDRESS 36
+#define REG_INT_TIMER_CTRL 40
+#define REG_APOLL_TIMER_CTRL 44
+#define REG_DMA_BLEN_CTRL 48
+#define REG_RESERVED1 52
+#define REG_MAC_CTRL 136
+#define REG_MAC_STATUS 140
+#define REG_PHY_CTRL 144
+#define REG_PHY_WRITE_DATA 148
+#define REG_FLOW_CTRL 152
+#define REG_BACK_PRESSURE 156
+#define REG_RESERVED2 160
+#define REG_TEST_SEED 196
+#define REG_DMA_FIFO_STATE 200
+#define REG_TEST_MODE 204
+#define REG_RESERVED3 208
+#define REG_TX_COL_COUNTER 212
+#define REG_RPF_AEP_COUNTER 216
+#define REG_XM_PG_COUNTER 220
+#define REG_RUNT_TLC_COUNTER 224
+#define REG_CRC_FTL_COUNTER 228
+#define REG_RLC_RCC_COUNTER 232
+#define REG_BROC_COUNTER 236
+#define REG_MULCA_COUNTER 240
+#define REG_RP_COUNTER 244
+#define REG_XP_COUNTER 248
+
+#define REG_PHY_CTRL_OFFSET 0x0
+#define REG_PHY_STATUS 0x1
+#define REG_PHY_ID1 0x2
+#define REG_PHY_ID2 0x3
+#define REG_PHY_ANA 0x4
+#define REG_PHY_ANLPAR 0x5
+#define REG_PHY_ANE 0x6
+#define REG_PHY_ECTRL1 0x10
+#define REG_PHY_QPDS 0x11
+#define REG_PHY_10BOP 0x12
+#define REG_PHY_ECTRL2 0x13
+#define REG_PHY_FTMAC100_WRITE 0x8000000
+#define REG_PHY_FTMAC100_READ 0x4000000
+
+/* REG_INTERRUPT_STATUS */
+#define RPKT_FINISH BIT(0) /* DMA data received */
+#define NORXBUF BIT(1) /* receive buffer unavailable */
+#define XPKT_FINISH BIT(2) /* DMA moved data to TX FIFO */
+#define NOTXBUF BIT(3) /* transmit buffer unavailable */
+#define XPKT_OK_INT_STS BIT(4) /* transmit to ethernet success */
+#define XPKT_LOST_INT_STS BIT(5) /* transmit ethernet lost (collision) */
+#define RPKT_SAV BIT(6) /* FIFO receive success */
+#define RPKT_LOST_INT_STS BIT(7) /* FIFO full, receive failed */
+#define AHB_ERR BIT(8) /* AHB error */
+#define PHYSTS_CHG BIT(9) /* PHY link status change */
+
+/* REG_INTERRUPT_MASK */
+#define RPKT_FINISH_M BIT(0)
+#define NORXBUF_M BIT(1)
+#define XPKT_FINISH_M BIT(2)
+#define NOTXBUF_M BIT(3)
+#define XPKT_OK_M BIT(4)
+#define XPKT_LOST_M BIT(5)
+#define RPKT_SAV_M BIT(6)
+#define RPKT_LOST_M BIT(7)
+#define AHB_ERR_M BIT(8)
+#define PHYSTS_CHG_M BIT(9)
+
+/* REG_MAC_MS_ADDRESS */
+#define MAC_MADR_MASK 0xffff /* 2 MSB MAC address */
+
+/* REG_INT_TIMER_CTRL */
+#define TXINT_TIME_SEL BIT(15) /* TX cycle time period */
+#define TXINT_THR_MASK 0x7000
+#define TXINT_CNT_MASK 0xf00
+#define RXINT_TIME_SEL BIT(7) /* RX cycle time period */
+#define RXINT_THR_MASK 0x70
+#define RXINT_CNT_MASK 0xF
+
+/* REG_APOLL_TIMER_CTRL */
+#define TXPOLL_TIME_SEL BIT(12) /* TX poll time period */
+#define TXPOLL_CNT_MASK 0xf00
+#define TXPOLL_CNT_SHIFT_BIT 8
+#define RXPOLL_TIME_SEL BIT(4) /* RX poll time period */
+#define RXPOLL_CNT_MASK 0xF
+#define RXPOLL_CNT_SHIFT_BIT 0
+
+/* REG_DMA_BLEN_CTRL */
+#define RX_THR_EN BIT(9) /* RX FIFO threshold arbitration */
+#define RXFIFO_HTHR_MASK 0x1c0
+#define RXFIFO_LTHR_MASK 0x38
+#define INCR16_EN BIT(2) /* AHB bus INCR16 burst command */
+#define INCR8_EN BIT(1) /* AHB bus INCR8 burst command */
+#define INCR4_EN BIT(0) /* AHB bus INCR4 burst command */
+
+/* REG_MAC_CTRL */
+#define RX_BROADPKT BIT(17) /* receive broadcast packets */
+#define RX_MULTIPKT BIT(16) /* receive all multicast packets */
+#define FULLDUP BIT(15) /* full duplex */
+#define CRC_APD BIT(14) /* append CRC to transmitted packet */
+#define RCV_ALL BIT(12) /* ignore incoming packet destination */
+#define RX_FTL BIT(11) /* accept packets larger than 1518 B */
+#define RX_RUNT BIT(10) /* accept packets smaller than 64 B */
+#define HT_MULTI_EN BIT(9) /* accept on hash and mcast pass */
+#define RCV_EN BIT(8) /* receiver enable */
+#define ENRX_IN_HALFTX BIT(6) /* enable receive in half duplex mode */
+#define XMT_EN BIT(5) /* transmit enable */
+#define CRC_DIS BIT(4) /* disable CRC check when receiving */
+#define LOOP_EN BIT(3) /* internal loop-back */
+#define SW_RST BIT(2) /* software reset, last 64 AHB clocks */
+#define RDMA_EN BIT(1) /* enable receive DMA chan */
+#define XDMA_EN BIT(0) /* enable transmit DMA chan */
+
+/* REG_MAC_STATUS */
+#define COL_EXCEED BIT(11) /* more than 16 collisions */
+#define LATE_COL BIT(10) /* transmit late collision detected */
+#define XPKT_LOST BIT(9) /* transmit to ethernet lost */
+#define XPKT_OK BIT(8) /* transmit to ethernet success */
+#define RUNT_MAC_STS BIT(7) /* receive runt detected */
+#define FTL_MAC_STS BIT(6) /* receive frame too long detected */
+#define CRC_ERR_MAC_STS BIT(5)
+#define RPKT_LOST BIT(4) /* RX FIFO full, receive failed */
+#define RPKT_SAVE BIT(3) /* RX FIFO receive success */
+#define COL BIT(2) /* collision, incoming packet dropped */
+#define MCPU_BROADCAST BIT(1)
+#define MCPU_MULTICAST BIT(0)
+
+/* REG_PHY_CTRL */
+#define MIIWR BIT(27) /* init write sequence (auto cleared)*/
+#define MIIRD BIT(26)
+#define REGAD_MASK 0x3e00000
+#define PHYAD_MASK 0x1f0000
+#define MIIRDATA_MASK 0xffff
+
+/* REG_PHY_WRITE_DATA */
+#define MIIWDATA_MASK 0xffff
+
+/* REG_FLOW_CTRL */
+#define PAUSE_TIME_MASK 0xffff0000
+#define FC_HIGH_MASK 0xf000
+#define FC_LOW_MASK 0xf00
+#define RX_PAUSE BIT(4) /* receive pause frame */
+#define TX_PAUSED BIT(3) /* transmit pause due to receive */
+#define FCTHR_EN BIT(2) /* enable threshold mode. */
+#define TX_PAUSE BIT(1) /* transmit pause frame */
+#define FC_EN BIT(0) /* flow control mode enable */
+
+/* REG_BACK_PRESSURE */
+#define BACKP_LOW_MASK 0xf00
+#define BACKP_JAM_LEN_MASK 0xf0
+#define BACKP_MODE BIT(1) /* address mode */
+#define BACKP_ENABLE BIT(0)
+
+/* REG_TEST_SEED */
+#define TEST_SEED_MASK 0x3fff
+
+/* REG_DMA_FIFO_STATE */
+#define TX_DMA_REQUEST BIT(31)
+#define RX_DMA_REQUEST BIT(30)
+#define TX_DMA_GRANT BIT(29)
+#define RX_DMA_GRANT BIT(28)
+#define TX_FIFO_EMPTY BIT(27)
+#define RX_FIFO_EMPTY BIT(26)
+#define TX_DMA2_SM_MASK 0x7000
+#define TX_DMA1_SM_MASK 0xf00
+#define RX_DMA2_SM_MASK 0x70
+#define RX_DMA1_SM_MASK 0xF
+
+/* REG_TEST_MODE */
+#define SINGLE_PKT BIT(26) /* single packet mode */
+#define PTIMER_TEST BIT(25) /* automatic polling timer test mode */
+#define ITIMER_TEST BIT(24) /* interrupt timer test mode */
+#define TEST_SEED_SELECT BIT(22)
+#define SEED_SELECT BIT(21)
+#define TEST_MODE BIT(20)
+#define TEST_TIME_MASK 0xffc00
+#define TEST_EXCEL_MASK 0x3e0
+
+/* REG_TX_COL_COUNTER */
+#define TX_MCOL_MASK 0xffff0000
+#define TX_MCOL_SHIFT_BIT 16
+#define TX_SCOL_MASK 0xffff
+#define TX_SCOL_SHIFT_BIT 0
+
+/* REG_RPF_AEP_COUNTER */
+#define RPF_MASK 0xffff0000
+#define RPF_SHIFT_BIT 16
+#define AEP_MASK 0xffff
+#define AEP_SHIFT_BIT 0
+
+/* REG_XM_PG_COUNTER */
+#define XM_MASK 0xffff0000
+#define XM_SHIFT_BIT 16
+#define PG_MASK 0xffff
+#define PG_SHIFT_BIT 0
+
+/* REG_RUNT_TLC_COUNTER */
+#define RUNT_CNT_MASK 0xffff0000
+#define RUNT_CNT_SHIFT_BIT 16
+#define TLCC_MASK 0xffff
+#define TLCC_SHIFT_BIT 0
+
+/* REG_CRC_FTL_COUNTER */
+#define CRCER_CNT_MASK 0xffff0000
+#define CRCER_CNT_SHIFT_BIT 16
+#define FTL_CNT_MASK 0xffff
+#define FTL_CNT_SHIFT_BIT 0
+
+/* REG_RLC_RCC_COUNTER */
+#define RLC_MASK 0xffff0000
+#define RLC_SHIFT_BIT 16
+#define RCC_MASK 0xffff
+#define RCC_SHIFT_BIT 0
+
+/* REG_PHY_STATUS */
+#define AN_COMPLETE 0x20
+#define LINK_STATUS 0x4
+
+struct moxart_mac_priv_t {
+ void __iomem *base;
+ struct net_device_stats stats;
+ unsigned int reg_maccr;
+ unsigned int reg_imr;
+ struct napi_struct napi;
+ struct net_device *ndev;
+
+ dma_addr_t rx_base;
+ dma_addr_t rx_mapping[RX_DESC_NUM];
+ void __iomem *rx_desc_base;
+ unsigned char *rx_buf_base;
+ unsigned char *rx_buf[RX_DESC_NUM];
+ unsigned int rx_head;
+ unsigned int rx_buf_size;
+
+ dma_addr_t tx_base;
+ dma_addr_t tx_mapping[TX_DESC_NUM];
+ void __iomem *tx_desc_base;
+ unsigned char *tx_buf_base;
+ unsigned char *tx_buf[RX_DESC_NUM];
+ unsigned int tx_head;
+ unsigned int tx_buf_size;
+
+ spinlock_t txlock;
+ unsigned int tx_len[TX_DESC_NUM];
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ unsigned int tx_tail;
+};
+
+#if TX_BUF_SIZE >= TX_BUF_SIZE_MAX
+#error MOXA ART Ethernet device driver TX buffer is too large!
+#endif
+#if RX_BUF_SIZE >= RX_BUF_SIZE_MAX
+#error MOXA ART Ethernet device driver RX buffer is too large!
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 967bae8b85c5..149355b52ad0 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -74,6 +74,7 @@
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
+#include <net/busy_poll.h>
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
@@ -194,6 +195,21 @@ struct myri10ge_slice_state {
int cpu;
__be32 __iomem *dca_tag;
#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define SLICE_STATE_IDLE 0
+#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */
+#define SLICE_STATE_POLL 2 /* poll owns this slice */
+#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
+#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */
+#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */
+#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
+ spinlock_t lock;
+ unsigned long lock_napi_yield;
+ unsigned long lock_poll_yield;
+ unsigned long busy_poll_miss;
+ unsigned long busy_poll_cnt;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
char irq_desc[32];
};
@@ -244,7 +260,7 @@ struct myri10ge_priv {
int fw_ver_minor;
int fw_ver_tiny;
int adopted_rx_filter_bug;
- u8 mac_addr[6]; /* eeprom mac address */
+ u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
@@ -909,6 +925,92 @@ abort:
return status;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+ spin_lock_init(&ss->lock);
+ ss->state = SLICE_STATE_IDLE;
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+ int rc = true;
+ spin_lock(&ss->lock);
+ if ((ss->state & SLICE_LOCKED)) {
+ WARN_ON((ss->state & SLICE_STATE_NAPI));
+ ss->state |= SLICE_STATE_NAPI_YIELD;
+ rc = false;
+ ss->lock_napi_yield++;
+ } else
+ ss->state = SLICE_STATE_NAPI;
+ spin_unlock(&ss->lock);
+ return rc;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+ spin_lock(&ss->lock);
+ WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
+ ss->state = SLICE_STATE_IDLE;
+ spin_unlock(&ss->lock);
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+ int rc = true;
+ spin_lock_bh(&ss->lock);
+ if ((ss->state & SLICE_LOCKED)) {
+ ss->state |= SLICE_STATE_POLL_YIELD;
+ rc = false;
+ ss->lock_poll_yield++;
+ } else
+ ss->state |= SLICE_STATE_POLL;
+ spin_unlock_bh(&ss->lock);
+ return rc;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+ spin_lock_bh(&ss->lock);
+ WARN_ON((ss->state & SLICE_STATE_NAPI));
+ ss->state = SLICE_STATE_IDLE;
+ spin_unlock_bh(&ss->lock);
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+ WARN_ON(!(ss->state & SLICE_LOCKED));
+ return (ss->state & SLICE_USER_PEND);
+}
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+#endif
+
static int myri10ge_reset(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
@@ -1300,6 +1402,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
}
}
+#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
+
static inline int
myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
@@ -1311,6 +1415,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
+ bool polling;
if (len <= mgp->small_bytes) {
rx = &ss->rx_small;
@@ -1325,7 +1430,15 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
prefetch(va);
- skb = napi_get_frags(&ss->napi);
+ /* When busy polling in user context, allocate skb and copy headers to
+ * skb's linear memory ourselves. When not busy polling, use the napi
+ * gro api.
+ */
+ polling = myri10ge_ss_busy_polling(ss);
+ if (polling)
+ skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
+ else
+ skb = napi_get_frags(&ss->napi);
if (unlikely(skb == NULL)) {
ss->stats.rx_dropped++;
for (i = 0, remainder = len; remainder > 0; i++) {
@@ -1364,8 +1477,29 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
}
myri10ge_vlan_rx(mgp->dev, va, skb);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
+ skb_mark_napi_id(skb, &ss->napi);
+
+ if (polling) {
+ int hlen;
+
+ /* myri10ge_vlan_rx might have moved the header, so compute
+ * length and address again.
+ */
+ hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
+ va = page_address(skb_frag_page(&rx_frags[0])) +
+ rx_frags[0].page_offset;
+ /* Copy header into the skb linear memory */
+ skb_copy_to_linear_data(skb, va, hlen);
+ rx_frags[0].page_offset += hlen;
+ rx_frags[0].size -= hlen;
+ skb->data_len -= hlen;
+ skb->tail += hlen;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ else
+ napi_gro_frags(&ss->napi);
- napi_gro_frags(&ss->napi);
return 1;
}
@@ -1524,10 +1658,14 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
if (ss->mgp->dca_enabled)
myri10ge_update_dca(ss);
#endif
+ /* Try later if the busy_poll handler is running. */
+ if (!myri10ge_ss_lock_napi(ss))
+ return budget;
/* process as many rx events as NAPI will allow */
work_done = myri10ge_clean_rx_done(ss, budget);
+ myri10ge_ss_unlock_napi(ss);
if (work_done < budget) {
napi_complete(napi);
put_be32(htonl(3), ss->irq_claim);
@@ -1535,6 +1673,34 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
return work_done;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int myri10ge_busy_poll(struct napi_struct *napi)
+{
+ struct myri10ge_slice_state *ss =
+ container_of(napi, struct myri10ge_slice_state, napi);
+ struct myri10ge_priv *mgp = ss->mgp;
+ int work_done;
+
+ /* Poll only when the link is up */
+ if (mgp->link_state != MXGEFW_LINK_UP)
+ return LL_FLUSH_FAILED;
+
+ if (!myri10ge_ss_lock_poll(ss))
+ return LL_FLUSH_BUSY;
+
+ /* Process a small number of packets */
+ work_done = myri10ge_clean_rx_done(ss, 4);
+ if (work_done)
+ ss->busy_poll_cnt += work_done;
+ else
+ ss->busy_poll_miss++;
+
+ myri10ge_ss_unlock_poll(ss);
+
+ return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
static irqreturn_t myri10ge_intr(int irq, void *arg)
{
struct myri10ge_slice_state *ss = arg;
@@ -1742,6 +1908,10 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "tx_linearized",
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
+ "rx_busy_poll_cnt",
+#endif
};
#define MYRI10GE_NET_STATS_LEN 21
@@ -1842,6 +2012,12 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
data[i++] = (unsigned int)ss->tx.wake_queue;
data[i++] = (unsigned int)ss->tx.stop_queue;
data[i++] = (unsigned int)ss->tx.linearized;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ data[i++] = ss->lock_napi_yield;
+ data[i++] = ss->lock_poll_yield;
+ data[i++] = ss->busy_poll_miss;
+ data[i++] = ss->busy_poll_cnt;
+#endif
}
}
@@ -2405,6 +2581,9 @@ static int myri10ge_open(struct net_device *dev)
goto abort_with_rings;
}
+ /* Initialize the slice spinlock and state used for polling */
+ myri10ge_ss_init_lock(ss);
+
/* must happen prior to any irq */
napi_enable(&(ss)->napi);
}
@@ -2481,9 +2660,19 @@ static int myri10ge_close(struct net_device *dev)
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
+ local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
for (i = 0; i < mgp->num_slices; i++) {
napi_disable(&mgp->ss[i].napi);
+ /* Lock the slice to prevent the busy_poll handler from
+ * accessing it. Later when we bring the NIC up, myri10ge_open
+ * resets the slice including this lock.
+ */
+ while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
+ pr_info("Slice %d locked\n", i);
+ mdelay(1);
+ }
}
+ local_bh_enable();
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
@@ -3569,8 +3758,11 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
}
+ napi_hash_del(&ss->napi);
netif_napi_del(&ss->napi);
}
+ /* Wait till napi structs are no longer used, and then free ss. */
+ synchronize_rcu();
kfree(mgp->ss);
mgp->ss = NULL;
}
@@ -3591,9 +3783,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
- ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
- &ss->rx_done.bus,
- GFP_KERNEL | __GFP_ZERO);
+ ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
+ &ss->rx_done.bus,
+ GFP_KERNEL);
if (ss->rx_done.entry == NULL)
goto abort;
bytes = sizeof(*ss->fw_stats);
@@ -3606,6 +3798,7 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
ss->dev = mgp->dev;
netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
myri10ge_napi_weight);
+ napi_hash_add(&ss->napi);
}
return 0;
abort:
@@ -3625,13 +3818,12 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
struct pci_dev *pdev = mgp->pdev;
char *old_fw;
bool old_allocated;
- int i, status, ncpus, msix_cap;
+ int i, status, ncpus;
mgp->num_slices = 1;
- msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
ncpus = netif_get_num_default_rss_queues();
- if (myri10ge_max_slices == 1 || msix_cap == 0 ||
+ if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
(myri10ge_max_slices == -1 && ncpus < 2))
return;
@@ -3749,6 +3941,9 @@ static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_set_rx_mode = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = myri10ge_busy_poll,
+#endif
};
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index dc2c6f561e9a..e6f0a4366f90 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -390,7 +390,7 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
- pdata = (struct netxeth_platform_data *)pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
priv->xc = request_xc(pdata->xcno, &pdev->dev);
if (!priv->xc) {
dev_err(&pdev->dev, "unable to request xc engine\n");
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index e88bdb1aa669..79645f74b3a8 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -922,7 +922,7 @@ static void __init get_mac_address(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
- char addr[6];
+ char addr[ETH_ALEN];
pdev = ether->pdev;
@@ -934,7 +934,7 @@ static void __init get_mac_address(struct net_device *dev)
addr[5] = 0xa8;
if (is_valid_ether_addr(addr))
- memcpy(dev->dev_addr, &addr, 0x06);
+ memcpy(dev->dev_addr, &addr, ETH_ALEN);
else
dev_err(&pdev->dev, "invalid mac address\n");
}
@@ -1014,7 +1014,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
if (ether->rxirq < 0) {
dev_err(&pdev->dev, "failed to get ether rx irq\n");
error = -ENXIO;
- goto failed_free_txirq;
+ goto failed_free_io;
}
platform_set_drvdata(pdev, dev);
@@ -1023,7 +1023,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
if (IS_ERR(ether->clk)) {
dev_err(&pdev->dev, "failed to get ether clock\n");
error = PTR_ERR(ether->clk);
- goto failed_free_rxirq;
+ goto failed_free_io;
}
ether->rmiiclk = clk_get(&pdev->dev, "RMII");
@@ -1049,10 +1049,6 @@ failed_put_rmiiclk:
clk_put(ether->rmiiclk);
failed_put_clk:
clk_put(ether->clk);
-failed_free_rxirq:
- free_irq(ether->rxirq, pdev);
-failed_free_txirq:
- free_irq(ether->txirq, pdev);
failed_free_io:
iounmap(ether->reg);
failed_free_mem:
@@ -1075,9 +1071,6 @@ static int w90p910_ether_remove(struct platform_device *pdev)
iounmap(ether->reg);
release_mem_region(ether->res->start, resource_size(ether->res));
- free_irq(ether->txirq, dev);
- free_irq(ether->rxirq, dev);
-
del_timer_sync(&ether->check_timer);
free_netdev(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 7779036690cc..6797b1075874 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -582,6 +582,19 @@ struct pch_gbe_hw_stats {
};
/**
+ * struct pch_gbe_privdata - PCI Device ID driver data
+ * @phy_tx_clk_delay: Bool, configure the PHY TX delay in software
+ * @phy_disable_hibernate: Bool, disable PHY hibernation
+ * @platform_init: Platform initialization callback, called from
+ * probe, prior to PHY initialization.
+ */
+struct pch_gbe_privdata {
+ bool phy_tx_clk_delay;
+ bool phy_disable_hibernate;
+ int (*platform_init)(struct pci_dev *pdev);
+};
+
+/**
* struct pch_gbe_adapter - board specific private data structure
* @stats_lock: Spinlock structure for status
* @ethtool_lock: Spinlock structure for ethtool
@@ -604,6 +617,7 @@ struct pch_gbe_hw_stats {
* @rx_buffer_len: Receive buffer length
* @tx_queue_len: Transmit queue length
* @have_msi: PCI MSI mode flag
+ * @pch_gbe_privdata: PCI Device ID driver_data
*/
struct pch_gbe_adapter {
@@ -631,6 +645,7 @@ struct pch_gbe_adapter {
int hwts_tx_en;
int hwts_rx_en;
struct pci_dev *ptp_pdev;
+ struct pch_gbe_privdata *pdata;
};
#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 1129db0cdf82..f0ceb89af931 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -118,6 +118,7 @@ static int pch_gbe_set_settings(struct net_device *netdev,
* filled by get_settings() on a down link, speed is -1: */
if (speed == UINT_MAX) {
speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL;
}
ret = mii_ethtool_sset(&adapter->mii, ecmd);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ab1039a95bf9..5a0f04c2c813 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
+#include <linux/gpio.h>
#define DRV_VERSION "1.01"
const char pch_driver_version[] = DRV_VERSION;
@@ -111,6 +112,8 @@ const char pch_driver_version[] = DRV_VERSION;
#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
+#define MINNOW_PHY_RESET_GPIO 13
+
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
@@ -682,7 +685,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
}
adapter->hw.phy.addr = adapter->mii.phy_id;
netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
- if (addr == 32)
+ if (addr == PCH_GBE_PHY_REGS_LEN)
return -EAGAIN;
/* Selected the phy and isolate the rest */
for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
@@ -1488,9 +1491,9 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
bufsz = adapter->rx_buffer_len;
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
- rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
- &rx_ring->rx_buff_pool_logic,
- GFP_KERNEL | __GFP_ZERO);
+ rx_ring->rx_buff_pool =
+ dma_zalloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
if (!rx_ring->rx_buff_pool)
return -ENOMEM;
@@ -1804,9 +1807,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL | __GFP_ZERO);
+ tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc) {
vfree(tx_ring->buffer_info);
return -ENOMEM;
@@ -1849,9 +1851,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
return -ENOMEM;
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL | __GFP_ZERO);
+ rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
vfree(rx_ring->buffer_info);
return -ENOMEM;
@@ -2635,6 +2636,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
adapter->hw.back = adapter;
adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
+ adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
+ if (adapter->pdata && adapter->pdata->platform_init)
+ adapter->pdata->platform_init(pdev);
adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
PCI_DEVFN(12, 4));
@@ -2710,6 +2714,10 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "PCH Network Connection\n");
+ /* Disable hibernation on certain platforms */
+ if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
+ pch_gbe_phy_disable_hibernate(&adapter->hw);
+
device_set_wakeup_enable(&pdev->dev, 1);
return 0;
@@ -2720,9 +2728,48 @@ err_free_netdev:
return ret;
}
+/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
+ * ensure it is awake for probe and init. Request the line and reset the PHY.
+ */
+static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
+{
+ unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
+ unsigned gpio = MINNOW_PHY_RESET_GPIO;
+ int ret;
+
+ ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
+ "minnow_phy_reset");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
+ return ret;
+ }
+
+ gpio_set_value(gpio, 0);
+ usleep_range(1250, 1500);
+ gpio_set_value(gpio, 1);
+ usleep_range(1250, 1500);
+
+ return ret;
+}
+
+static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
+ .phy_tx_clk_delay = true,
+ .phy_disable_hibernate = true,
+ .platform_init = pch_gbe_minnow_platform_init,
+};
+
static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
{.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
+ .subvendor = PCI_VENDOR_ID_CIRCUITCO,
+ .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00),
+ .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
+ },
+ {.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index da079073a6c6..8b7ff75fc8e0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -74,6 +74,15 @@
#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+/* AR8031 PHY Debug Registers */
+#define PHY_AR803X_ID 0x00001374
+#define PHY_AR8031_DBG_OFF 0x1D
+#define PHY_AR8031_DBG_DAT 0x1E
+#define PHY_AR8031_SERDES 0x05
+#define PHY_AR8031_HIBERNATE 0x0B
+#define PHY_AR8031_SERDES_TX_CLK_DLY 0x0100 /* TX clock delay of 2.0ns */
+#define PHY_AR8031_PS_HIB_EN 0x8000 /* Hibernate enable */
+
/* Phy Id Register (word 2) */
#define PHY_REVISION_MASK 0x000F
@@ -249,6 +258,51 @@ void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
}
/**
+ * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw)
+{
+ /* The RGMII interface requires a ~2ns TX clock delay. This is typically
+ * done in layout with a longer trace or via PHY strapping, but can also
+ * be done via PHY configuration registers.
+ */
+ struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+ u16 mii_reg;
+ int ret = 0;
+
+ switch (hw->phy.id) {
+ case PHY_AR803X_ID:
+ netdev_dbg(adapter->netdev,
+ "Configuring AR803X PHY for 2ns TX clock delay\n");
+ pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg);
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+ PHY_AR8031_SERDES);
+ if (ret)
+ break;
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+ mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY;
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+ mii_reg);
+ break;
+ default:
+ netdev_err(adapter->netdev,
+ "Unknown PHY (%x), could not set TX clock delay\n",
+ hw->phy.id);
+ return -EINVAL;
+ }
+
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Could not configure tx clock delay for PHY\n");
+ return ret;
+}
+
+/**
* pch_gbe_phy_init_setting - PHY initial setting
* @hw: Pointer to the HW structure
*/
@@ -277,4 +331,48 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
+
+ /* Setup a TX clock delay on certain platforms */
+ if (adapter->pdata && adapter->pdata->phy_tx_clk_delay)
+ pch_gbe_phy_tx_clk_delay(hw);
+}
+
+/**
+ * pch_gbe_phy_disable_hibernate - Disable the PHY low power state
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+ u16 mii_reg;
+ int ret = 0;
+
+ switch (hw->phy.id) {
+ case PHY_AR803X_ID:
+ netdev_dbg(adapter->netdev,
+ "Disabling hibernation for AR803X PHY\n");
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+ PHY_AR8031_HIBERNATE);
+ if (ret)
+ break;
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+ mii_reg &= ~PHY_AR8031_PS_HIB_EN;
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+ mii_reg);
+ break;
+ default:
+ netdev_err(adapter->netdev,
+ "Unknown PHY (%x), could not disable hibernation\n",
+ hw->phy.id);
+ return -EINVAL;
+ }
+
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Could not disable PHY hibernation\n");
+ return ret;
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 03264dc7b5ec..0cbe69206e04 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -33,5 +33,6 @@ void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw);
#endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index a5f0b5da6149..c498181a9aa8 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -191,7 +191,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
struct device_node *dn = pci_device_to_OF_node(pdev);
int len;
const u8 *maddr;
- u8 addr[6];
+ u8 addr[ETH_ALEN];
if (!dn) {
dev_dbg(&pdev->dev,
@@ -201,8 +201,8 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
maddr = of_get_property(dn, "local-mac-address", &len);
- if (maddr && len == 6) {
- memcpy(mac->mac_addr, maddr, 6);
+ if (maddr && len == ETH_ALEN) {
+ memcpy(mac->mac_addr, maddr, ETH_ALEN);
return 0;
}
@@ -219,14 +219,15 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
return -ENOENT;
}
- if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
- &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+ if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
+ != ETH_ALEN) {
dev_warn(&pdev->dev,
"can't parse mac address, not configuring\n");
return -EINVAL;
}
- memcpy(mac->mac_addr, addr, 6);
+ memcpy(mac->mac_addr, addr, ETH_ALEN);
return 0;
}
@@ -439,10 +440,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
goto out_ring_desc;
- ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
- RX_RING_SIZE * sizeof(u64),
- &ring->buf_dma,
- GFP_KERNEL | __GFP_ZERO);
+ ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
+ RX_RING_SIZE * sizeof(u64),
+ &ring->buf_dma, GFP_KERNEL);
if (!ring->buffers)
goto out_ring_desc;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index e2f4efa8ad46..f2749d46c125 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -83,7 +83,7 @@ struct pasemi_mac {
#define MAC_TYPE_GMAC 1
#define MAC_TYPE_XAUI 2
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0e1797295a48..f59e6be4a66e 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -45,6 +45,17 @@ config QLCNIC_SRIOV
This allows for virtual function acceleration in virtualized
environments.
+config QLCNIC_DCB
+ bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support"
+ depends on QLCNIC && DCB
+ default y
+ ---help---
+ This configuration parameter enables DCB support in QLE83XX
+ and QLE82XX Converged Ethernet devices. This allows for DCB
+ get operations support through rtNetlink interface. Only CEE
+ mode of DCB is supported. PG and PFC values are related only
+ to Tx.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 9fbb1cdbfa47..8375cbde9969 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -536,10 +536,10 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 null_addr[6];
+ u8 null_addr[ETH_ALEN];
int i;
- memset(null_addr, 0, 6);
+ memset(null_addr, 0, ETH_ALEN);
if (netdev->flags & IFF_PROMISC) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index ec4cf7fd4123..cbd75f97ffb3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -459,16 +459,14 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
{
u32 control;
- int pos;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_dword(pdev, pos, &control);
+ if (pdev->msix_cap) {
+ pci_read_config_dword(pdev, pdev->msix_cap, &control);
if (enable)
control |= PCI_MSIX_FLAGS_ENABLE;
else
control = 0;
- pci_write_config_dword(pdev, pos, control);
+ pci_write_config_dword(pdev, pdev->msix_cap, control);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 4b1fb3faa3b7..a848d2979722 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -11,3 +11,5 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
qlcnic_minidump.o qlcnic_sriov_common.o
qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
+
+qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 221645e9f182..88349b8fa39a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -20,7 +20,6 @@
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/firmware.h>
-
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/timer.h>
@@ -35,11 +34,12 @@
#include "qlcnic_hdr.h"
#include "qlcnic_hw.h"
#include "qlcnic_83xx_hw.h"
+#include "qlcnic_dcb.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 2
-#define _QLCNIC_LINUX_SUBVERSION 44
-#define QLCNIC_LINUX_VERSIONID "5.2.44"
+#define _QLCNIC_LINUX_MINOR 3
+#define _QLCNIC_LINUX_SUBVERSION 50
+#define QLCNIC_LINUX_VERSIONID "5.3.50"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,6 +98,9 @@
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -389,7 +392,7 @@ struct qlcnic_dump_template_hdr {
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
- u8 enable; /* enable/disable dump */
+ bool enable; /* enable/disable dump */
u32 size; /* total size of the dump */
void *data; /* dump data area */
struct qlcnic_dump_template_hdr *tmpl_hdr;
@@ -460,14 +463,16 @@ struct qlcnic_hardware_context {
struct qlcnic_fdt fdt;
struct qlc_83xx_reset reset;
struct qlc_83xx_idc idc;
- struct qlc_83xx_fw_info fw_info;
+ struct qlc_83xx_fw_info *fw_info;
struct qlcnic_intrpt_config *intr_tbl;
struct qlcnic_sriov *sriov;
u32 *reg_tbl;
u32 *ext_reg_tbl;
u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
u32 mbox_reg[4];
- spinlock_t mbx_lock;
+ struct qlcnic_mailbox *mailbox;
+ u8 extend_lb_time;
+ u8 phys_port_id[ETH_ALEN];
};
struct qlcnic_adapter_stats {
@@ -515,6 +520,7 @@ struct qlcnic_host_sds_ring {
u32 num_desc;
void __iomem *crb_sts_consumer;
+ struct qlcnic_host_tx_ring *tx_ring;
struct status_desc *desc_head;
struct qlcnic_adapter *adapter;
struct napi_struct napi;
@@ -532,9 +538,17 @@ struct qlcnic_host_tx_ring {
void __iomem *crb_intr_mask;
char name[IFNAMSIZ + 12];
u16 ctx_id;
+
+ u32 state;
u32 producer;
u32 sw_consumer;
u32 num_desc;
+
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
struct qlcnic_adapter *adapter;
@@ -559,7 +573,6 @@ struct qlcnic_recv_context {
u32 state;
u16 context_id;
u16 virt_port;
-
};
/* HW context creation */
@@ -604,6 +617,7 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
#define QLCNIC_CAP0_VALIDOFF (1 << 11)
#define QLCNIC_CAP0_LRO_MSS (1 << 21)
+#define QLCNIC_CAP0_TX_MULTI (1 << 22)
/*
* Context state
@@ -631,7 +645,7 @@ struct qlcnic_hostrq_rds_ring {
struct qlcnic_hostrq_rx_ctx {
__le64 host_rsp_dma_addr; /* Response dma'd here */
- __le32 capabilities[4]; /* Flag bit vector */
+ __le32 capabilities[4]; /* Flag bit vector */
__le32 host_int_crb_mode; /* Interrupt crb usage */
__le32 host_rds_crb_mode; /* RDS crb usage */
/* These ring offsets are relative to data[0] below */
@@ -802,6 +816,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
+#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -814,6 +829,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_BDG BIT_8
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
+#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4
#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
@@ -821,6 +837,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -913,6 +930,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_FW_LRO_MSS_CAP 0x8000
#define QLCNIC_TX_INTR_SHARED 0x10000
#define QLCNIC_APP_CHANGED_FLAGS 0x20000
+#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -922,11 +941,11 @@ struct qlcnic_ipaddr {
#define QLCNIC_BEACON_DISABLE 0xD
#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
+#define QLCNIC_DEF_NUM_TX_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
-#define QLCNIC_NETDEV_WEIGHT 128
#define QLCNIC_ADAPTER_UP_MAGIC 777
#define __QLCNIC_FW_ATTACHED 0
@@ -937,10 +956,13 @@ struct qlcnic_ipaddr {
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_MULTI_TX_UNIQUE 9
#define __QLCNIC_SRIOV_ENABLE 10
#define __QLCNIC_SRIOV_CAPABLE 11
#define __QLCNIC_MBX_POLL_ENABLE 12
#define __QLCNIC_DIAG_MODE 13
+#define __QLCNIC_DCB_STATE 14
+#define __QLCNIC_DCB_IN_AEN 15
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -950,12 +972,6 @@ struct qlcnic_ipaddr {
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
#define QLCNIC_LB_BUCKET_SIZE 32
-
-/* QLCNIC Driver Error Code */
-#define QLCNIC_FW_NOT_RESPOND 51
-#define QLCNIC_TEST_IN_PROGRESS 52
-#define QLCNIC_UNDEFINED_ERROR 53
-#define QLCNIC_LB_CABLE_NOT_CONN 54
#define QLCNIC_ILB_MAX_RCV_LOOP 10
struct qlcnic_filter {
@@ -972,6 +988,21 @@ struct qlcnic_filter_hash {
u16 fbucket_size;
};
+/* Mailbox specific data structures */
+struct qlcnic_mailbox {
+ struct workqueue_struct *work_q;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_mbx_ops *ops;
+ struct work_struct work;
+ struct completion completion;
+ struct list_head cmd_q;
+ unsigned long status;
+ spinlock_t queue_lock; /* Mailbox queue lock */
+ spinlock_t aen_lock; /* Mailbox response/AEN lock */
+ atomic_t rsp_status;
+ u32 num_cmds;
+};
+
struct qlcnic_adapter {
struct qlcnic_hardware_context *ahw;
struct qlcnic_recv_context *recv_ctx;
@@ -1035,6 +1066,7 @@ struct qlcnic_adapter {
struct delayed_work fw_work;
struct delayed_work idc_aen_work;
struct delayed_work mbx_poll_work;
+ struct qlcnic_dcb *dcb;
struct qlcnic_filter_hash fhash;
struct qlcnic_filter_hash rx_fhash;
@@ -1152,6 +1184,7 @@ struct qlcnic_pci_info {
};
struct qlcnic_npar_info {
+ bool eswitch_status;
u16 pvid;
u16 min_bw;
u16 max_bw;
@@ -1371,7 +1404,6 @@ struct qlcnic_esw_statistics {
struct __qlcnic_esw_statistics tx;
};
-#define QLCNIC_DUMP_MASK_DEF 0x1f
#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
@@ -1385,9 +1417,20 @@ struct _cdrp_cmd {
};
struct qlcnic_cmd_args {
- struct _cdrp_cmd req;
- struct _cdrp_cmd rsp;
- int op_type;
+ struct completion completion;
+ struct list_head list;
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+ atomic_t rsp_status;
+ int pay_size;
+ u32 rsp_opcode;
+ u32 total_cmds;
+ u32 op_type;
+ u32 type;
+ u32 cmd_op;
+ u32 *hdr; /* Back channel message header */
+ u32 *pay; /* Back channel message payload */
+ u8 func_num;
};
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1435,6 +1478,12 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
+pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+void qlcnic_82xx_io_resume(struct pci_dev *);
/* Functions from qlcnic_init.c */
void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
@@ -1462,7 +1511,8 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
@@ -1474,6 +1524,7 @@ void __qlcnic_set_multi(struct net_device *, u16);
int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
@@ -1495,8 +1546,9 @@ int qlcnic_reset_context(struct qlcnic_adapter *);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
+int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1523,6 +1575,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
void qlcnic_free_tx_rings(struct qlcnic_adapter *);
int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
@@ -1585,6 +1638,26 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
+static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err, tx_q;
+
+ tx_q = adapter->max_drv_tx_rings;
+
+ netdev->num_tx_queues = tx_q;
+ netdev->real_num_tx_queues = tx_q;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_q);
+ if (err)
+ dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
+ tx_q);
+ else
+ dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+
+ return err;
+}
+
struct qlcnic_nic_template {
int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
int (*config_led) (struct qlcnic_adapter *, u32, u32);
@@ -1600,6 +1673,20 @@ struct qlcnic_nic_template {
int (*resume)(struct qlcnic_adapter *);
};
+struct qlcnic_mbx_ops {
+ int (*enqueue_cmd) (struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *, unsigned long *);
+ void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*nofity_fw) (struct qlcnic_adapter *, u8);
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+
/* Adapter hardware abstraction */
struct qlcnic_hardware_ops {
void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
@@ -1607,8 +1694,8 @@ struct qlcnic_hardware_ops {
int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
- int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
- int (*setup_intr) (struct qlcnic_adapter *, u8);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
+ int (*setup_intr) (struct qlcnic_adapter *, u8, int);
int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1641,6 +1728,11 @@ struct qlcnic_hardware_ops {
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
+ int (*read_phys_port_id) (struct qlcnic_adapter *);
+ pci_ers_result_t (*io_error_detected) (struct pci_dev *,
+ pci_channel_state_t);
+ pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
+ void (*io_resume) (struct pci_dev *);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1669,14 +1761,15 @@ static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
}
static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
- u8 *mac)
+ u8 *mac, u8 function)
{
- return adapter->ahw->hw_ops->get_mac_address(adapter, mac);
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
}
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
+ u8 num_intr, int txq)
{
- return adapter->ahw->hw_ops->setup_intr(adapter, num_intr);
+ return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
}
static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -1867,6 +1960,12 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->set_mac_filter_count(adapter);
}
+static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->read_phys_port_id)
+ adapter->ahw->hw_ops->read_phys_port_id(adapter);
+}
+
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
u32 key)
{
@@ -1898,16 +1997,45 @@ static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
}
+static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+}
+
+static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
+{
+ test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->max_drv_tx_rings = 1;
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x1
+ * to src register, instead of 0x0 to disable receiving interrupt.
+ */
static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
{
- writel(0, sds_ring->crb_intr_mask);
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0x1, sds_ring->crb_intr_mask);
+ else
+ writel(0, sds_ring->crb_intr_mask);
}
+/* When operating in a muti tx mode, driver needs to write 0x0
+ * to src register, instead of 0x1 to enable receiving interrupts.
+ */
static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
{
struct qlcnic_adapter *adapter = sds_ring->adapter;
- writel(0x1, sds_ring->crb_intr_mask);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0, sds_ring->crb_intr_mask);
+ else
+ writel(0x1, sds_ring->crb_intr_mask);
if (!QLCNIC_IS_MSI_FAMILY(adapter))
writel(0xfbff, adapter->tgt_mask_reg);
@@ -1939,9 +2067,11 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
__func__, ##_args); \
} while (0)
-#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
-#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
@@ -1949,12 +2079,22 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
}
+static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+}
+
static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
bool status;
status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
return status;
@@ -1968,7 +2108,105 @@ static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+
+ return status;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->get_hw_capability)
+ return dcb->ops->get_hw_capability(adapter);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->free)
+ dcb->ops->free(adapter);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->attach)
+ return dcb->ops->attach(adapter);
+
+ return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->query_hw_capability)
+ return dcb->ops->query_hw_capability(adapter, buf);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->get_info)
+ dcb->ops->get_info(adapter);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->query_cee_param)
+ return dcb->ops->query_cee_param(adapter, buf, type);
+
+ return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->get_cee_cfg)
+ return dcb->ops->get_cee_cfg(adapter);
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->register_aen)
+ dcb->ops->register_aen(adapter, flag);
+}
+
+static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
+ void *msg)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->handle_aen)
+ dcb->ops->handle_aen(adapter, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
- return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+ if (dcb && dcb->ops->init_dcbnl_ops)
+ dcb->ops->init_dcbnl_ops(adapter);
}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 9d4bb7f83904..a1818dae47b6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -11,6 +11,7 @@
#include <linux/ipv6.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
+#include <linux/aer.h>
#define QLCNIC_MAX_TX_QUEUES 1
#define RSS_HASHTYPE_IP_TCP 0x3
@@ -67,6 +68,8 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -149,7 +152,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.get_mac_address = qlcnic_83xx_get_mac_address,
.setup_intr = qlcnic_83xx_setup_intr,
.alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
- .mbx_cmd = qlcnic_83xx_mbx_op,
+ .mbx_cmd = qlcnic_83xx_issue_cmd,
.get_func_no = qlcnic_83xx_get_func_no,
.api_lock = qlcnic_83xx_cam_lock,
.api_unlock = qlcnic_83xx_cam_unlock,
@@ -175,6 +178,10 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.get_board_info = qlcnic_83xx_get_port_info,
.set_mac_filter_count = qlcnic_83xx_set_mac_filter_count,
.free_mac_list = qlcnic_82xx_free_mac_list,
+ .io_error_detected = qlcnic_83xx_io_error_detected,
+ .io_slot_reset = qlcnic_83xx_io_slot_reset,
+ .io_resume = qlcnic_83xx_io_resume,
+
};
static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@ -261,7 +268,7 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
{
int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -362,6 +369,10 @@ static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
for (i = 0; i < cmd->rsp.num; i++)
cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
}
@@ -398,24 +409,33 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
return IRQ_HANDLED;
}
+static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+{
+ atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ complete(&mbx->completion);
+}
+
static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
{
- u32 resp, event;
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
unsigned long flags;
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
-
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
if (!(resp & QLCNIC_SET_OWNER))
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
-
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
out:
qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
}
irqreturn_t qlcnic_83xx_intr(int irq, void *data)
@@ -515,7 +535,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
}
/* Enable mailbox interrupt */
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
return err;
}
@@ -628,7 +648,7 @@ void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
ahw->max_uc_count = count;
}
-void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
{
u32 val;
@@ -682,11 +702,14 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
u32 data[]);
-static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
int i;
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
dev_info(&adapter->pdev->dev,
"Host MBX regs(%d)\n", cmd->req.num);
for (i = 0; i < cmd->req.num; i++) {
@@ -705,120 +728,73 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
pr_info("\n");
}
-/* Mailbox response for mac rcode */
-u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- u32 fw_data;
- u8 mac_cmd_rcode;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int opcode = LSW(cmd->req.arg[0]);
+ unsigned long max_loops;
- fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
- mac_cmd_rcode = (u8)fw_data;
- if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
- mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
- mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
- return QLCNIC_RCODE_SUCCESS;
- return 1;
-}
+ max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
-{
- u32 data;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* wait for mailbox completion */
- do {
- data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
- if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
- data = QLCNIC_RCODE_TIMEOUT;
- break;
- }
- mdelay(1);
- } while (!data);
- return data;
+ for (; max_loops; max_loops--) {
+ if (atomic_read(&cmd->rsp_status) ==
+ QLC_83XX_MBX_RESPONSE_ARRIVED)
+ return;
+
+ udelay(1);
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
+ flush_workqueue(ahw->mailbox->work_q);
+ return;
}
-int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- int i;
- u16 opcode;
- u8 mbx_err_code;
- unsigned long flags;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
+ int cmd_type, err, opcode;
+ unsigned long timeout;
opcode = LSW(cmd->req.arg[0]);
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
- dev_info(&adapter->pdev->dev,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- dev_info(&adapter->pdev->dev, "Mailbox detached\n");
- return 0;
+ cmd_type = cmd->type;
+ err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
}
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-
- if (mbx_val) {
- QLCDB(adapter, DRV,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- QLCDB(adapter, DRV,
- "Mailbox not available, 0x%x, collect FW dump\n",
- mbx_val);
- cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return cmd->rsp.arg[0];
- }
-
- /* Fill in mailbox registers */
- mbx_cmd = cmd->req.arg[0];
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
- for (i = 1; i < cmd->req.num; i++)
- writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
-
- /* Signal FW about the impending command */
- QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
-poll:
- rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
- if (rsp != QLCNIC_RCODE_TIMEOUT) {
- /* Get the FW response data */
- fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
- if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
- __qlcnic_83xx_process_aen(adapter);
- goto poll;
- }
- mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
- rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
- opcode = QLCNIC_MBX_RSP(fw_data);
- qlcnic_83xx_get_mbx_data(adapter, cmd);
-
- switch (mbx_err_code) {
- case QLCNIC_MBX_RSP_OK:
- case QLCNIC_MBX_PORT_RSP_OK:
- rsp = QLCNIC_RCODE_SUCCESS;
- break;
- default:
- if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
- rsp = qlcnic_83xx_mac_rcode(adapter);
- if (!rsp)
- goto out;
- }
+ switch (cmd_type) {
+ case QLC_83XX_MBX_CMD_WAIT:
+ if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
dev_err(&adapter->pdev->dev,
- "MBX command 0x%x failed with err:0x%x\n",
- opcode, mbx_err_code);
- rsp = mbx_err_code;
- qlcnic_dump_mbx(adapter, cmd);
- break;
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
}
- goto out;
+ break;
+ case QLC_83XX_MBX_CMD_NO_WAIT:
+ return 0;
+ case QLC_83XX_MBX_CMD_BUSY_WAIT:
+ qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ qlcnic_83xx_detach_mailbox_work(adapter);
}
- dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
- QLCNIC_MBX_RSP(mbx_cmd));
- rsp = QLCNIC_RCODE_TIMEOUT;
-out:
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return rsp;
+ return cmd->rsp_opcode;
}
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -828,6 +804,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
u32 temp;
const struct qlcnic_mailbox_metadata *mbx_tbl;
+ memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
mbx_tbl = qlcnic_83xx_mbx_tbl;
size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
for (i = 0; i < size; i++) {
@@ -850,6 +827,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ mbx->cmd_op = type;
return 0;
}
}
@@ -888,9 +866,9 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 event[QLC_83XX_MBX_AEN_CNT];
int i;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
event[i] = readl(QLCNIC_MBX_FW(ahw, i));
@@ -910,6 +888,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
&adapter->idc_aen_work, 0);
break;
case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ ahw->extend_lb_time = event[1] >> 8 & 0xf;
break;
case QLCNIC_MBX_BC_EVENT:
qlcnic_sriov_handle_bc_event(adapter, event[1]);
@@ -922,6 +901,9 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
QLCNIC_MBX_RSP(event[0]));
break;
+ case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
+ qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
+ break;
default:
dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
QLCNIC_MBX_RSP(event[0]));
@@ -933,20 +915,23 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 resp, event;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
unsigned long flags;
- spin_lock_irqsave(&ahw->mbx_lock, flags);
-
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
if (resp & QLCNIC_SET_OWNER) {
event = readl(QLCNIC_MBX_FW(ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
}
-
- spin_unlock_irqrestore(&ahw->mbx_lock, flags);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
}
static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
@@ -969,6 +954,7 @@ void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
return;
INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
}
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
@@ -1355,8 +1341,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
/* disable and free mailbox interrupt */
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_83xx_enable_mbx_poll(adapter);
qlcnic_83xx_free_mbx_intr(adapter);
+ }
adapter->ahw->loopback_state = 0;
adapter->ahw->hw_ops->setup_link_event(adapter, 1);
}
@@ -1377,6 +1365,8 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_disable_intr(adapter, sds_ring);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_enable_mbx_poll(adapter);
}
}
@@ -1386,6 +1376,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
err = qlcnic_83xx_setup_mbx_intr(adapter);
+ qlcnic_83xx_disable_mbx_poll(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"%s: failed to setup mbx interrupt\n",
@@ -1402,6 +1393,10 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_disable_mbx_poll(adapter);
out:
netif_device_attach(netdev);
}
@@ -1619,26 +1614,33 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
- int err;
+ struct qlcnic_cmd_args *cmd = NULL;
u32 temp = 0;
- struct qlcnic_cmd_args cmd;
+ int err;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
- err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter,
QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
if (err)
- return err;
+ goto out;
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
- cmd.req.arg[1] = (mode ? 1 : 0) | temp;
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_info(&adapter->pdev->dev,
- "Promiscous mode config failed\n");
+ cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
- qlcnic_free_mbx_args(&cmd);
+ qlcnic_free_mbx_args(cmd);
+
+out:
+ kfree(cmd);
return err;
}
@@ -1651,7 +1653,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev,
"Loopback test not supported in non privileged mode\n");
- return ret;
+ return -ENOTSUPP;
}
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
@@ -1679,19 +1681,17 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
/* Poll for link up event before running traffic */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
- ret = -EIO;
+ ret = -EBUSY;
goto free_diag_res;
}
if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
netdev_info(netdev,
"Firmware didn't sent link up event to loopback request\n");
- ret = -QLCNIC_FW_NOT_RESPOND;
+ ret = -ETIMEDOUT;
qlcnic_83xx_clear_lb_mode(adapter, mode);
goto free_diag_res;
}
@@ -1700,7 +1700,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
/* Make sure carrier is off and queue is stopped during loopback */
if (netif_running(netdev)) {
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
}
ret = qlcnic_do_lb_test(adapter, mode);
@@ -1716,18 +1716,42 @@ fail_diag_alloc:
return ret;
}
+static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
+ u32 *max_wait_count)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int temp;
+
+ netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
+ ahw->extend_lb_time);
+ temp = ahw->extend_lb_time * 1000;
+ *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
+ ahw->extend_lb_time = 0;
+}
+
int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct net_device *netdev = adapter->netdev;
+ u32 config, max_wait_count;
int status = 0, loop = 0;
- u32 config;
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
status = qlcnic_83xx_get_port_config(adapter);
if (status)
return status;
config = ahw->port_config;
+
+ /* Check if port is already in loopback mode */
+ if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
+ (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
+ netdev_err(netdev,
+ "Port already in Loopback mode.\n");
+ return -EINPROGRESS;
+ }
+
set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
if (mode == QLCNIC_ILB_MODE)
@@ -1748,21 +1772,24 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
/* Wait for Link and IDC Completion AEN */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -EBUSY;
}
- if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
- netdev_err(netdev,
- "Did not receive IDC completion AEN\n");
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
qlcnic_83xx_clear_lb_mode(adapter, mode);
- return -EIO;
+ return -ETIMEDOUT;
}
} while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
@@ -1774,10 +1801,12 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = ahw->port_config, max_wait_count;
struct net_device *netdev = adapter->netdev;
int status = 0, loop = 0;
- u32 config = ahw->port_config;
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
if (mode == QLCNIC_ILB_MODE)
ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
@@ -1797,21 +1826,23 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
/* Wait for Link and IDC Completion AEN */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -EBUSY;
}
- if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
- netdev_err(netdev,
- "Did not receive IDC completion AEN\n");
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -ETIMEDOUT;
}
} while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
@@ -1950,25 +1981,31 @@ static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
u16 vlan_id, u8 op)
{
- int err;
- u32 *buf, temp = 0;
- struct qlcnic_cmd_args cmd;
+ struct qlcnic_cmd_args *cmd = NULL;
struct qlcnic_macvlan_mbx mv;
+ u32 *buf, temp = 0;
+ int err;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
if (err)
- return err;
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
if (vlan_id)
op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
- cmd.req.arg[1] = op | (1 << 8);
+ cmd->req.arg[1] = op | (1 << 8);
qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
- cmd.req.arg[1] |= temp;
+ cmd->req.arg[1] |= temp;
mv.vlan = vlan_id;
mv.mac_addr0 = addr[0];
mv.mac_addr1 = addr[1];
@@ -1976,14 +2013,15 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
mv.mac_addr3 = addr[3];
mv.mac_addr4 = addr[4];
mv.mac_addr5 = addr[5];
- buf = &cmd.req.arg[2];
+ buf = &cmd->req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_err(&adapter->pdev->dev,
- "MAC-VLAN %s to CAM failed, err=%d.\n",
- ((op == 1) ? "add " : "delete "), err);
- qlcnic_free_mbx_args(&cmd);
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+out:
+ kfree(cmd);
return err;
}
@@ -2008,12 +2046,14 @@ void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
cmd->req.arg[1] = type;
}
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
{
int err, i;
struct qlcnic_cmd_args cmd;
u32 mac_low, mac_high;
+ function = 0;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
if (err)
return err;
@@ -2099,10 +2139,12 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
struct qlcnic_adapter *adapter = data;
- unsigned long flags;
+ struct qlcnic_mailbox *mbx;
u32 mask, resp, event;
+ unsigned long flags;
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ mbx = adapter->ahw->mailbox;
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
if (!(resp & QLCNIC_SET_OWNER))
goto out;
@@ -2110,11 +2152,13 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
if (event & QLCNIC_MBX_ASYNC_EVENT)
__qlcnic_83xx_process_aen(adapter);
+ else
+ qlcnic_83xx_notify_mbx_response(mbx);
+
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
writel(0, adapter->ahw->pci_base0 + mask);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
-
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
return IRQ_HANDLED;
}
@@ -2287,7 +2331,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
pci_info->tx_max_bw, pci_info->mac);
}
if (ahw->op_mode == QLCNIC_MGMT_FUNC)
- dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n",
+ dev_info(dev, "Max functions = %d, active functions = %d\n",
ahw->max_pci_func, ahw->act_pci_func);
} else {
@@ -3477,3 +3521,360 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
idc->delay);
return err;
}
+
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
+{
+ INIT_COMPLETION(mbx->completion);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+}
+
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
+{
+ destroy_workqueue(mbx->work_q);
+ kfree(mbx);
+}
+
+static inline void
+qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ return;
+ }
+ complete(&cmd->completion);
+}
+
+static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ spin_lock(&mbx->queue_lock);
+
+ while (!list_empty(head)) {
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+ dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
+ __func__, cmd->cmd_op);
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ }
+
+ spin_unlock(&mbx->queue_lock);
+}
+
+static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ u32 host_mbx_ctrl;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
+ return -EBUSY;
+
+ host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (host_mbx_ctrl) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ ahw->idc.collect_dump = 1;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
+ u8 issue_cmd)
+{
+ if (issue_cmd)
+ QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+ else
+ QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+
+ spin_unlock(&mbx->queue_lock);
+
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+}
+
+static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, j;
+
+ if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+ } else {
+ fw_hal_version = ahw->fw_hal_version;
+ hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
+ total_size = cmd->pay_size + hdr_size;
+ tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
+ mbx_cmd = tmp | fw_hal_version << 29;
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+
+ /* Back channel specific operations bits */
+ mbx_cmd = 0x1 | 1 << 4;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ mbx_cmd |= cmd->func_num << 5;
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
+ for (j = 0; j < cmd->pay_size; j++, i++)
+ writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
+ }
+}
+
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ complete(&mbx->completion);
+ cancel_work_sync(&mbx->work);
+ flush_workqueue(mbx->work_q);
+ qlcnic_83xx_flush_mbx_queue(adapter);
+}
+
+static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd,
+ unsigned long *timeout)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ init_completion(&cmd->completion);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_add_tail(&cmd->list, &mbx->cmd_q);
+ mbx->num_cmds++;
+ cmd->total_cmds = mbx->num_cmds;
+ *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+ queue_work(mbx->work_q, &mbx->work);
+
+ spin_unlock(&mbx->queue_lock);
+
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 mac_cmd_rcode;
+ u32 fw_data;
+
+ if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ return QLCNIC_RCODE_SUCCESS;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u8 mbx_err_code;
+ u32 fw_data;
+
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
+ break;
+
+ dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode, mbx_err_code);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
+ qlcnic_dump_mbx(adapter, cmd);
+ }
+
+ return;
+}
+
+static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+{
+ struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
+ work);
+ struct qlcnic_adapter *adapter = mbx->adapter;
+ struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ struct device *dev = &adapter->pdev->dev;
+ atomic_t *rsp_status = &mbx->rsp_status;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ ahw = adapter->ahw;
+
+ while (true) {
+ if (qlcnic_83xx_check_mbx_status(adapter)) {
+ qlcnic_83xx_flush_mbx_queue(adapter);
+ return;
+ }
+
+ atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+
+ spin_lock(&mbx->queue_lock);
+
+ if (list_empty(head)) {
+ spin_unlock(&mbx->queue_lock);
+ return;
+ }
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+
+ spin_unlock(&mbx->queue_lock);
+
+ mbx_ops->encode_cmd(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+
+ if (wait_for_completion_timeout(&mbx->completion,
+ QLC_83XX_MBX_TIMEOUT)) {
+ mbx_ops->decode_resp(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
+ } else {
+ dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mbx(adapter, cmd);
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
+ }
+ mbx_ops->dequeue_cmd(adapter, cmd);
+ }
+}
+
+static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+ .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
+ .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
+ .decode_resp = qlcnic_83xx_decode_mbx_rsp,
+ .encode_cmd = qlcnic_83xx_encode_mbx_cmd,
+ .nofity_fw = qlcnic_83xx_signal_mbx_cmd,
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx;
+
+ ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
+ if (!ahw->mailbox)
+ return -ENOMEM;
+
+ mbx = ahw->mailbox;
+ mbx->ops = &qlcnic_83xx_mbx_ops;
+ mbx->adapter = adapter;
+
+ spin_lock_init(&mbx->queue_lock);
+ spin_lock_init(&mbx->aen_lock);
+ INIT_LIST_HEAD(&mbx->cmd_q);
+ init_completion(&mbx->completion);
+
+ mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
+ if (mbx->work_q == NULL) {
+ kfree(mbx);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+ return 0;
+}
+
+pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (state == pci_channel_io_normal)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ set_bit(__QLCNIC_AER, &adapter->state);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_83xx_aer_stop_poll_work(adapter);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ int err = 0;
+
+ pdev->error_state = pci_channel_io_normal;
+ err = pci_enable_device(pdev);
+ if (err)
+ goto disconnect;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ err = qlcnic_83xx_aer_reset(adapter);
+ if (err == 0)
+ return PCI_ERS_RESULT_RECOVERED;
+disconnect:
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ qlcnic_83xx_aer_start_poll_work(adapter);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 272f56a2e14b..533e150503af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -84,11 +84,20 @@
/* Firmware image definitions */
#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
#define QLC_83XX_BOOT_FROM_FLASH 0
#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+#define QLC_FW_FILE_NAME_LEN 20
#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+#define QLC_83XX_MBX_POST_BC_OP 0x1
+#define QLC_83XX_MBX_COMPLETION 0x0
+#define QLC_83XX_MBX_REQUEST 0x1
+
+#define QLC_83XX_MBX_TIMEOUT (5 * HZ)
+#define QLC_83XX_MBX_CMD_LOOP 5000000
+
/* status descriptor mailbox data
* @phy_addr_{low|high}: physical address of buffer
* @sds_ring_size: buffer size
@@ -265,11 +274,7 @@ struct qlcnic_macvlan_mbx {
struct qlc_83xx_fw_info {
const struct firmware *fw;
- u16 major_fw_version;
- u8 minor_fw_version;
- u8 sub_fw_version;
- u8 fw_build_num;
- u8 load_from_file;
+ char fw_file_name[QLC_FW_FILE_NAME_LEN];
};
struct qlc_83xx_reset {
@@ -288,6 +293,7 @@ struct qlc_83xx_reset {
#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
+#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4
#define QLC_83XX_IDC_TIMESTAMP 0
#define QLC_83XX_IDC_DURATION 1
#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
@@ -397,6 +403,7 @@ enum qlcnic_83xx_states {
#define QLC_83XX_MAX_MC_COUNT 38
#define QLC_83XX_MAX_UC_COUNT 4096
+#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22
#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
@@ -404,6 +411,7 @@ enum qlcnic_83xx_states {
#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
+#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
#define QLC_83XX_DEFAULT_MODE 0x0
#define QLC_83XX_SRIOV_MODE 0x1
@@ -449,6 +457,20 @@ enum qlcnic_83xx_states {
#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
+enum qlc_83xx_mbx_cmd_type {
+ QLC_83XX_MBX_CMD_WAIT = 0,
+ QLC_83XX_MBX_CMD_NO_WAIT,
+ QLC_83XX_MBX_CMD_BUSY_WAIT,
+};
+
+enum qlc_83xx_mbx_response_states {
+ QLC_83XX_MBX_RESPONSE_WAIT = 0,
+ QLC_83XX_MBX_RESPONSE_ARRIVED,
+};
+
+#define QLC_83XX_MBX_RESPONSE_FAILED 0x2
+#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3
+
/* Additional registers in 83xx */
enum qlc_83xx_ext_regs {
QLCNIC_GLOBAL_RESET = 0,
@@ -498,8 +520,8 @@ enum qlc_83xx_ext_regs {
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
-int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -540,7 +562,7 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
struct qlcnic_cmd_args *);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
@@ -551,7 +573,7 @@ void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_handle_aen(int, void *);
int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
-void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_intr(int, void *);
@@ -604,6 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u8);
int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
@@ -623,8 +646,6 @@ int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
@@ -634,4 +655,11 @@ int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+void qlcnic_83xx_io_resume(struct pci_dev *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 345d987aede4..f09e787af0b2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -399,6 +399,7 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
/* Disable mailbox interrupt */
qlcnic_83xx_disable_mbx_intr(adapter);
@@ -610,6 +611,9 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
{
int err;
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
@@ -617,7 +621,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
if (err)
return err;
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
if (qlcnic_83xx_configure_opmode(adapter)) {
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -631,6 +635,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
if (adapter->portnum == 0)
qlcnic_set_drv_version(adapter);
+
+ qlcnic_dcb_get_info(adapter);
qlcnic_83xx_idc_attach_driver(adapter);
return 0;
@@ -641,7 +647,6 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
@@ -792,7 +797,6 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
} else {
ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
- return ret;
}
return ret;
@@ -811,9 +815,10 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
**/
static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
{
- u32 val;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
+ u32 val;
/* Perform NIC configuration based ready state entry actions */
if (ahw->idc.state_entry(adapter))
@@ -825,7 +830,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
dev_err(&adapter->pdev->dev,
"Error: device temperature %d above limits\n",
adapter->ahw->temp);
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_detach_driver(adapter);
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -838,7 +843,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
if (ret) {
adapter->flags |= QLCNIC_FW_HANG;
if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
}
@@ -846,6 +851,8 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
}
if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+
/* Move to need reset state and prepare for reset */
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
return ret;
@@ -883,12 +890,13 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
**/
static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
int ret = 0;
if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(__QLCNIC_RESETTING, &adapter->state);
- clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
@@ -1080,7 +1088,6 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
/* Check if reset recovery is disabled */
@@ -1191,6 +1198,9 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
{
u32 val;
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
if (qlcnic_83xx_lock_driver(adapter)) {
dev_err(&adapter->pdev->dev,
"%s:failed, please retry\n", __func__);
@@ -1257,31 +1267,33 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ const struct firmware *fw = fw_info->fw;
u32 dest, *p_cache;
- u64 addr;
+ int i, ret = -EIO;
u8 data[16];
size_t size;
- int i, ret = -EIO;
+ u64 addr;
dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
- size = (adapter->ahw->fw_info.fw->size & ~0xF);
- p_cache = (u32 *)adapter->ahw->fw_info.fw->data;
+ size = (fw->size & ~0xF);
+ p_cache = (u32 *)fw->data;
addr = (u64)dest;
ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
(u32 *)p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
- release_firmware(adapter->ahw->fw_info.fw);
- adapter->ahw->fw_info.fw = NULL;
+ release_firmware(fw);
+ fw_info->fw = NULL;
return -EIO;
}
/* alignment check */
- if (adapter->ahw->fw_info.fw->size & 0xF) {
+ if (fw->size & 0xF) {
addr = dest + size;
- for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++)
- data[i] = adapter->ahw->fw_info.fw->data[size + i];
+ for (i = 0; i < (fw->size & 0xF); i++)
+ data[i] = fw->data[size + i];
for (; i < 16; i++)
data[i] = 0;
ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
@@ -1289,13 +1301,13 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
if (ret) {
dev_err(&adapter->pdev->dev,
"MS memory write failed\n");
- release_firmware(adapter->ahw->fw_info.fw);
- adapter->ahw->fw_info.fw = NULL;
+ release_firmware(fw);
+ fw_info->fw = NULL;
return -EIO;
}
}
- release_firmware(adapter->ahw->fw_info.fw);
- adapter->ahw->fw_info.fw = NULL;
+ release_firmware(fw);
+ fw_info->fw = NULL;
return 0;
}
@@ -1941,10 +1953,11 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
int err = -EIO;
- if (request_firmware(&adapter->ahw->fw_info.fw,
- QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) {
+ if (request_firmware(&fw_info->fw, fw_info->fw_file_name,
+ &(adapter->pdev->dev))) {
dev_err(&adapter->pdev->dev,
"No file FW image, loading flash FW image.\n");
QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
@@ -1990,36 +2003,6 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
return 0;
}
-/**
-* qlcnic_83xx_config_default_opmode
-*
-* @adapter: adapter structure
-*
-* Configure default driver operating mode
-*
-* Returns: Error code or Success(0)
-* */
-int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
-{
- u32 op_mode;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
-
- qlcnic_get_func_no(adapter);
- op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
-
- if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
- op_mode = QLC_83XX_DEFAULT_OPMODE;
-
- if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
- ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
- } else {
- return -EIO;
- }
-
- return 0;
-}
-
int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
{
int err;
@@ -2039,26 +2022,26 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
- /* VNIC mode is detected by BIT_23 in capabilities. This bit is also
- * set in case device is SRIOV capable. VNIC and SRIOV are mutually
- * exclusive. So in case of sriov capable device load driver in
- * default mode
+ /* eSwitch capability indicates vNIC mode.
+ * vNIC and SRIOV are mutually exclusive operational modes.
+ * If SR-IOV capability is detected, SR-IOV physical function
+ * will get initialized in default mode.
+ * SR-IOV virtual function initialization follows a
+ * different code path and opmode.
+ * SRIOV mode has precedence over vNIC mode.
*/
- if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) {
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
- return ahw->nic_mode;
- }
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
+ return QLC_83XX_DEFAULT_OPMODE;
- if (ahw->capabilities & BIT_23)
- ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
- else
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ return QLC_83XX_VIRTUAL_NIC_MODE;
- return ahw->nic_mode;
+ return QLC_83XX_DEFAULT_OPMODE;
}
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int ret;
ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2066,11 +2049,16 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
return -EIO;
if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
+ ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
if (qlcnic_83xx_config_vnic_opmode(adapter))
return -EIO;
- } else if (ret == QLC_83XX_DEFAULT_MODE) {
- if (qlcnic_83xx_config_default_opmode(adapter))
- return -EIO;
+
+ } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
+ ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ } else {
+ return -EIO;
}
return 0;
@@ -2139,20 +2127,82 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
}
}
+static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlc_83xx_fw_info *fw_info;
+ int err = 0;
+
+ ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
+ if (!ahw->fw_info) {
+ err = -ENOMEM;
+ } else {
+ fw_info = ahw->fw_info;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: Invalid device id\n",
+ __func__);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err = 0;
- if (qlcnic_sriov_vf_check(adapter))
- return qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ ahw->msix_supported = !!qlcnic_use_msi_x;
+ err = qlcnic_83xx_init_mailbox_work(adapter);
+ if (err)
+ goto exit;
- if (qlcnic_83xx_check_hw_status(adapter))
- return -EIO;
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ if (err)
+ goto detach_mbx;
+ else
+ return err;
+ }
+
+ err = qlcnic_83xx_check_hw_status(adapter);
+ if (err)
+ goto detach_mbx;
+
+ if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
+ qlcnic_83xx_read_flash_mfg_id(adapter);
+
+ err = qlcnic_83xx_get_fw_info(adapter);
+ if (err)
+ goto detach_mbx;
- /* Initilaize 83xx mailbox spinlock */
- spin_lock_init(&ahw->mbx_lock);
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ goto clear_fw_info;
+
+ err = qlcnic_setup_intr(adapter, 0, 0);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto disable_intr;
+ }
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto disable_mbx_intr;
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_clear_function_resources(adapter);
INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
@@ -2160,22 +2210,90 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
- if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
- qlcnic_83xx_read_flash_mfg_id(adapter);
-
- if (qlcnic_83xx_idc_init(adapter))
- return -EIO;
-
/* Configure default, SR-IOV or Virtual NIC mode of operation */
- if (qlcnic_83xx_configure_opmode(adapter))
- return -EIO;
+ err = qlcnic_83xx_configure_opmode(adapter);
+ if (err)
+ goto disable_mbx_intr;
/* Perform operating mode specific initialization */
- if (adapter->nic_ops->init_driver(adapter))
- return -EIO;
+ err = adapter->nic_ops->init_driver(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+ if (adapter->dcb && qlcnic_dcb_attach(adapter))
+ qlcnic_clear_dcb_ops(adapter);
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+ return 0;
+
+disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+disable_intr:
+ qlcnic_teardown_intr(adapter);
+
+clear_fw_info:
+ kfree(ahw->fw_info);
+
+detach_mbx:
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+exit:
+ return err;
+}
+
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ cancel_delayed_work_sync(&adapter->fw_work);
+
+ if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+}
+
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ int ret = 0;
+ u32 owner;
+
+ /* Mark the previous IDC state as NEED_RESET so
+ * that state_entry() will perform the reattachment
+ * and bringup the device
+ */
+ idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ ret = qlcnic_83xx_restart_hw(adapter);
+ if (ret < 0)
+ return ret;
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ }
+
+ ret = idc->state_entry(adapter);
+ return ret;
+}
+
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u32 owner;
+
+ idc->prev_state = QLC_83XX_IDC_DEV_READY;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner)
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
- return adapter->ahw->idc.err_code;
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 599d1fda52f2..0248a4c2f5dd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -208,7 +208,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
return -EIO;
}
- if (ahw->capabilities & BIT_23)
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
else
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
@@ -239,3 +239,41 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
return 0;
}
+
+static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
+ int func, int *port_id)
+{
+ struct qlcnic_info nic_info;
+ int err = 0;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ return err;
+
+ if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ *port_id = nic_info.phys_port;
+ else
+ err = -EIO;
+
+ return err;
+}
+
+int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
+{
+ int id, err = 0;
+
+ err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
+ if (err)
+ return err;
+
+ if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ if (!qlcnic_enable_eswitch(adapter, id, 1))
+ adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
+ else
+ err = -EIO;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index d09389b33474..86850dd633a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -38,6 +38,9 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
{QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
{QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
{QLCNIC_CMD_GET_LED_STATUS, 4, 2},
+ {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
};
static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
@@ -171,6 +174,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
break;
}
dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ qlcnic_dump_mbx(adapter, cmd);
} else if (rsp == QLCNIC_CDRP_RSP_OK)
cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
@@ -243,40 +247,38 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
{
- void *addr;
- struct qlcnic_hostrq_rx_ctx *prq;
- struct qlcnic_cardrsp_rx_ctx *prsp;
- struct qlcnic_hostrq_rds_ring *prq_rds;
- struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ struct net_device *netdev = adapter->netdev;
+ u32 temp_intr_crb_mode, temp_rds_crb_mode;
struct qlcnic_cardrsp_rds_ring *prsp_rds;
struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_cmd_args cmd;
-
- dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
- u64 phys_addr;
-
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rx_ctx *prq;
u8 i, nrds_rings, nsds_rings;
- u16 temp_u16;
+ struct qlcnic_cmd_args cmd;
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
+ u64 phys_addr;
+ u16 temp_u16;
+ void *addr;
int err;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
nrds_rings = adapter->max_rds_rings;
nsds_rings = adapter->max_sds_rings;
- rq_size =
- SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
- nsds_rings);
- rsp_size =
- SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
- nsds_rings);
+ rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &hostrq_phys_addr, GFP_KERNEL);
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = addr;
@@ -295,15 +297,20 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
- temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
- prq->valid_field_offset = cpu_to_le16(temp_u16);
- prq->txrx_sds_binding = nsds_rings - 1;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ cap |= QLCNIC_CAP0_TX_MULTI;
+ } else {
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
+ prq->txrx_sds_binding = nsds_rings - 1;
+ temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
+ temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
+ prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
+ }
prq->capabilities[0] = cpu_to_le32(cap);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
- prq->host_rds_crb_mode =
- cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
prq->num_rds_rings = cpu_to_le16(nrds_rings);
prq->num_sds_rings = cpu_to_le16(nsds_rings);
@@ -317,10 +324,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
le32_to_cpu(prq->rds_ring_offset));
for (i = 0; i < nrds_rings; i++) {
-
rds_ring = &recv_ctx->rds_rings[i];
rds_ring->producer = 0;
-
prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
prq_rds[i].ring_kind = cpu_to_le32(i);
@@ -331,14 +336,16 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
le32_to_cpu(prq->sds_ring_offset));
for (i = 0; i < nsds_rings; i++) {
-
sds_ring = &recv_ctx->sds_rings[i];
sds_ring->consumer = 0;
memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
-
prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
- prq_sds[i].msi_index = cpu_to_le16(i);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
+ else
+ prq_sds[i].msi_index = cpu_to_le16(i);
}
phys_addr = hostrq_phys_addr;
@@ -361,9 +368,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
rds_ring = &recv_ctx->rds_rings[i];
-
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
- rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
+ rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
}
prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -371,24 +377,30 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
sds_ring = &recv_ctx->sds_rings[i];
-
reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
- reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ reg2 = ahw->intr_tbl[i].src;
+ else
+ reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
- sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
- sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
+ sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
+ sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
}
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
recv_ctx->context_id = le16_to_cpu(prsp->context_id);
recv_ctx->virt_port = prsp->virt_port;
+ netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
qlcnic_free_mbx_args(&cmd);
+
out_free_rsp:
dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
cardrsp_phys_addr);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+
return err;
}
@@ -416,16 +428,19 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring,
int ring)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_hostrq_tx_ctx *prq;
struct qlcnic_hostrq_cds_ring *prq_cds;
struct qlcnic_cardrsp_tx_ctx *prsp;
- void *rq_addr, *rsp_addr;
- size_t rq_size, rsp_size;
- u32 temp;
struct qlcnic_cmd_args cmd;
- int err;
- u64 phys_addr;
- dma_addr_t rq_phys_addr, rsp_phys_addr;
+ u32 temp, intr_mask, temp_int_crb_mode;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ int temp_nsds_rings, index, err;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u64 phys_addr;
+ u16 msix_id;
/* reset host resources */
tx_ring->producer = 0;
@@ -433,32 +448,42 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
*(tx_ring->hw_consumer) = 0;
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
- rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL | __GFP_ZERO);
+ rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
- rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO);
+ rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
}
prq = rq_addr;
-
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
- QLCNIC_CAP0_LSO);
+ QLCNIC_CAP0_LSO);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ temp |= QLCNIC_CAP0_TX_MULTI;
+
prq->capabilities[0] = cpu_to_le32(temp);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
- prq->msi_index = 0;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ temp_nsds_rings = adapter->max_sds_rings;
+ index = temp_nsds_rings + ring;
+ msix_id = ahw->intr_tbl[index].id;
+ prq->msi_index = cpu_to_le16(msix_id);
+ } else {
+ temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
+ prq->msi_index = 0;
+ }
prq->interrupt_ctl = 0;
prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
@@ -480,15 +505,25 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
+ tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ index = adapter->max_sds_rings + ring;
+ intr_mask = ahw->intr_tbl[index].src;
+ tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
+ tx_ring->ctx_id, tx_ring->state);
} else {
- dev_err(&adapter->pdev->dev,
- "Failed to create tx ctx in firmware%d\n", err);
+ netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
+ err);
err = -EIO;
}
-
qlcnic_free_mbx_args(&cmd);
out_free_rsp:
@@ -618,6 +653,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
}
}
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
+ err = qlcnic_82xx_mq_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+
err = qlcnic_fw_cmd_create_rx_ctx(dev);
if (err)
goto err_out;
@@ -639,13 +681,19 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
}
set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
+
return 0;
err_out:
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(dev, 0);
+
if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
qlcnic_83xx_config_intrpt(dev, 0);
}
+
return err;
}
@@ -659,6 +707,12 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_del_tx_ctx(adapter,
&adapter->tx_ring[ring]);
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(adapter, 0);
+
if (qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
@@ -723,8 +777,54 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_cmd_args cmd;
+ u32 type, val;
+ int i, err = 0;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (ahw->intr_tbl[i].type << 4);
+ if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[1] = val;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(netdev, "Failed to %s interrupts %d\n",
+ op_type == QLCNIC_INTRPT_ADD ? "Add" :
+ "Delete", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+ val = cmd.rsp.arg[1];
+ if (LSB(val)) {
+ netdev_info(netdev,
+ "failed to configure interrupt for %d\n",
+ ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ ahw->intr_tbl[i].id = MSW(val);
+ ahw->intr_tbl[i].enabled = 1;
+ ahw->intr_tbl[i].src = cmd.rsp.arg[2];
+ } else {
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].enabled = 0;
+ ahw->intr_tbl[i].src = 0;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return err;
+}
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
{
int err, i;
struct qlcnic_cmd_args cmd;
@@ -734,7 +834,7 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
if (err)
return err;
- cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
+ cmd.req.arg[1] = function | BIT_8;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
@@ -765,8 +865,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args cmd;
size_t nic_size = sizeof(struct qlcnic_info_le);
- nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -819,8 +919,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return err;
- nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -872,9 +972,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
size_t npar_size = sizeof(struct qlcnic_pci_info_le);
size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
- pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t,
- GFP_KERNEL | __GFP_ZERO);
+ pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
if (!pci_info_addr)
return -ENOMEM;
@@ -974,8 +1073,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
return -EIO;
}
- stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
@@ -1030,8 +1129,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
if (mac_stats == NULL)
return -ENOMEM;
- stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
new file mode 100644
index 000000000000..d62d5ce432ec
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -0,0 +1,1179 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+#include "qlcnic.h"
+
+#define QLC_DCB_NUM_PARAM 3
+#define QLC_DCB_LOCAL_IDX 0
+#define QLC_DCB_OPER_IDX 1
+#define QLC_DCB_PEER_IDX 2
+
+#define QLC_DCB_GET_MAP(V) (1 << V)
+
+#define QLC_DCB_AEN_BIT 0x2
+#define QLC_DCB_FW_VER 0x2
+#define QLC_DCB_MAX_TC 0x8
+#define QLC_DCB_MAX_APP 0x8
+#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC
+#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC
+
+#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1)
+#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1)
+#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf)
+#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf)
+#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf)
+#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf)
+#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7)
+#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1)
+#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff)
+#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff)
+
+#define QLC_DCB_LOCAL_PARAM_FWID 0x3
+#define QLC_DCB_OPER_PARAM_FWID 0x1
+#define QLC_DCB_PEER_PARAM_FWID 0x2
+
+#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf)
+#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1)
+#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1)
+#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24)
+
+#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf)
+#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1)
+#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1)
+#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7)
+#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X)
+#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210)
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
+
+static void qlcnic_dcb_aen_work(struct work_struct *);
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
+static void __qlcnic_dcb_free(struct qlcnic_adapter *);
+static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
+static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
+static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+
+struct qlcnic_dcb_capability {
+ bool tsa_capability;
+ bool ets_capability;
+ u8 max_num_tc;
+ u8 max_ets_tc;
+ u8 max_pfc_tc;
+ u8 dcb_capability;
+};
+
+struct qlcnic_dcb_param {
+ u32 hdr_prio_pfc_map[2];
+ u32 prio_pg_map[2];
+ u32 pg_bw_map[2];
+ u32 pg_tsa_map[2];
+ u32 app[QLC_DCB_MAX_APP];
+};
+
+struct qlcnic_dcb_mbx_params {
+ /* 1st local, 2nd operational 3rd remote */
+ struct qlcnic_dcb_param type[3];
+ u32 prio_tc_map;
+};
+
+struct qlcnic_82xx_dcb_param_mbx_le {
+ __le32 hdr_prio_pfc_map[2];
+ __le32 prio_pg_map[2];
+ __le32 pg_bw_map[2];
+ __le32 pg_tsa_map[2];
+ __le32 app[QLC_DCB_MAX_APP];
+};
+
+enum qlcnic_dcb_selector {
+ QLC_SELECTOR_DEF = 0x0,
+ QLC_SELECTOR_ETHER,
+ QLC_SELECTOR_TCP,
+ QLC_SELECTOR_UDP,
+};
+
+enum qlcnic_dcb_prio_type {
+ QLC_PRIO_NONE = 0,
+ QLC_PRIO_GROUP,
+ QLC_PRIO_LINK,
+};
+
+enum qlcnic_dcb_pfc_type {
+ QLC_PFC_DISABLED = 0,
+ QLC_PFC_FULL,
+ QLC_PFC_TX,
+ QLC_PFC_RX
+};
+
+struct qlcnic_dcb_prio_cfg {
+ bool valid;
+ enum qlcnic_dcb_pfc_type pfc_type;
+};
+
+struct qlcnic_dcb_pg_cfg {
+ bool valid;
+ u8 total_bw_percent; /* of Link/ port BW */
+ u8 prio_count;
+ u8 tsa_type;
+};
+
+struct qlcnic_dcb_tc_cfg {
+ bool valid;
+ struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO];
+ enum qlcnic_dcb_prio_type prio_type; /* always prio_link */
+ u8 link_percent; /* % of link bandwidth */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 up_tc_map;
+ u8 pgid;
+};
+
+struct qlcnic_dcb_app {
+ bool valid;
+ enum qlcnic_dcb_selector selector;
+ u16 protocol;
+ u8 priority;
+};
+
+struct qlcnic_dcb_cee {
+ struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC];
+ struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG];
+ struct qlcnic_dcb_app app[QLC_DCB_MAX_APP];
+ bool tc_param_valid;
+ bool pfc_mode_enable;
+};
+
+struct qlcnic_dcb_cfg {
+ /* 0 - local, 1 - operational, 2 - remote */
+ struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM];
+ struct qlcnic_dcb_capability capability;
+ u32 version;
+};
+
+static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_83xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
+ .register_aen = qlcnic_83xx_dcb_register_aen,
+ .handle_aen = qlcnic_83xx_dcb_handle_aen,
+};
+
+static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_82xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
+ .handle_aen = qlcnic_82xx_dcb_handle_aen,
+};
+
+static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_NUMAPP(val);
+ else
+ return QLC_83XX_DCB_GET_NUMAPP(val);
+}
+
+static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_PFC_VALID(val);
+ else
+ return QLC_83XX_DCB_PFC_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_TSA_VALID(val);
+ else
+ return QLC_83XX_DCB_TSA_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_PRIOMAP_APP(val);
+ else
+ return QLC_83XX_DCB_GET_PRIOMAP_APP(val);
+}
+
+static int qlcnic_dcb_prio_count(u8 up_tc_map)
+{
+ int j;
+
+ for (j = 0; j < QLC_DCB_MAX_TC; j++)
+ if (up_tc_map & QLC_DCB_GET_MAP(j))
+ break;
+
+ return j;
+}
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+{
+ if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+}
+
+static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_82xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_82xx_dcb_ops;
+ else if (qlcnic_83xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
+}
+
+int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb;
+
+ dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
+ if (!dcb)
+ return -ENOMEM;
+
+ adapter->dcb = dcb;
+ dcb->adapter = adapter;
+ qlcnic_set_dcb_ops(adapter);
+
+ return 0;
+}
+
+static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (!dcb)
+ return;
+
+ qlcnic_dcb_register_aen(adapter, 0);
+
+ while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ usleep_range(10000, 11000);
+
+ cancel_delayed_work_sync(&dcb->aen_work);
+
+ if (dcb->wq) {
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+ }
+
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+ kfree(dcb->param);
+ dcb->param = NULL;
+ kfree(dcb);
+ adapter->dcb = NULL;
+}
+
+static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+{
+ qlcnic_dcb_get_hw_capability(adapter);
+ qlcnic_dcb_get_cee_cfg(adapter);
+ qlcnic_dcb_register_aen(adapter, 1);
+}
+
+static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+ int err = 0;
+
+ INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
+
+ dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
+ if (!dcb->wq) {
+ dev_err(&adapter->pdev->dev,
+ "DCB workqueue allocation failed. DCB will be disabled\n");
+ return -1;
+ }
+
+ dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC);
+ if (!dcb->cfg) {
+ err = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC);
+ if (!dcb->param) {
+ err = -ENOMEM;
+ goto out_free_cfg;
+ }
+
+ qlcnic_dcb_get_info(adapter);
+
+ return 0;
+out_free_cfg:
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+
+out_free_wq:
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+
+ return err;
+}
+
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
+ char *buf)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_out;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX capability, err %d\n", err);
+ } else {
+ mbx_out = cmd.rsp.arg[1];
+ if (buf)
+ memcpy(buf, &mbx_out, sizeof(u32));
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
+{
+ struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
+
+ err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
+ if (err)
+ return err;
+
+ mbx_out = *val;
+ if (QLC_DCB_TSA_SUPPORT(mbx_out))
+ cap->tsa_capability = true;
+
+ if (QLC_DCB_ETS_SUPPORT(mbx_out))
+ cap->ets_capability = true;
+
+ cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out);
+ cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out);
+ cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out);
+
+ if (cap->max_num_tc > QLC_DCB_MAX_TC ||
+ cap->max_ets_tc > cap->max_num_tc ||
+ cap->max_pfc_tc > cap->max_num_tc) {
+ dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_capability *cap;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ if (err)
+ return err;
+
+ cap = &cfg->capability;
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+ char *buf, u8 type)
+{
+ u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+ struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t cardrsp_phys_addr;
+ struct qlcnic_dcb_param rsp;
+ struct qlcnic_cmd_args cmd;
+ u64 phys_addr;
+ void *addr;
+ int err, i;
+
+ switch (type) {
+ case QLC_DCB_LOCAL_PARAM_FWID:
+ case QLC_DCB_OPER_PARAM_FWID:
+ case QLC_DCB_PEER_PARAM_FWID:
+ break;
+ default:
+ dev_err(dev, "Invalid parameter type %d\n", type);
+ return -EINVAL;
+ }
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
+ GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+
+ prsp_le = addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ goto out_free_rsp;
+
+ phys_addr = cardrsp_phys_addr;
+ cmd.req.arg[1] = size | (type << 16);
+ cmd.req.arg[2] = MSD(phys_addr);
+ cmd.req.arg[3] = LSD(phys_addr);
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(dev, "Failed to query DCBX parameter, err %d\n", err);
+ goto out;
+ }
+
+ memset(&rsp, 0, sizeof(struct qlcnic_dcb_param));
+ rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]);
+ rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]);
+ rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]);
+ rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]);
+ rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]);
+ rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]);
+ rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]);
+ rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]);
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++)
+ rsp.app[i] = le32_to_cpu(prsp_le->app[i]);
+
+ if (buf)
+ memcpy(buf, &rsp, size);
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb_mbx_params *mbx;
+ int err;
+
+ mbx = adapter->dcb->param;
+ if (!mbx)
+ return 0;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
+ QLC_DCB_LOCAL_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
+ QLC_DCB_OPER_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
+ QLC_DCB_PEER_PARAM_FWID);
+ if (err)
+ return err;
+
+ mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
+
+ qlcnic_dcb_data_cee_param_map(adapter);
+
+ return err;
+}
+
+static void qlcnic_dcb_aen_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_dcb *dcb;
+
+ dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
+ adapter = dcb->adapter;
+
+ qlcnic_dcb_get_cee_cfg(adapter);
+ clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
+}
+
+static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
+ void *data)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ return;
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ if (err)
+ return err;
+
+ if (mbx_out & BIT_2)
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE;
+ if (mbx_out & BIT_3)
+ cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE;
+ if (cap->dcb_capability)
+ cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+ char *buf, u8 idx)
+{
+ struct qlcnic_dcb_mbx_params mbx_out;
+ int err, i, j, k, max_app, size;
+ struct qlcnic_dcb_param *each;
+ struct qlcnic_cmd_args cmd;
+ u32 val;
+ char *p;
+
+ size = 0;
+ memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params));
+ memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params));
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ return err;
+
+ cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX param, err %d\n", err);
+ goto out;
+ }
+
+ mbx_out.prio_tc_map = cmd.rsp.arg[1];
+ p = memcpy(buf, &mbx_out, sizeof(u32));
+ k = 2;
+ p += sizeof(u32);
+
+ for (j = 0; j < QLC_DCB_NUM_PARAM; j++) {
+ each = &mbx_out.type[j];
+
+ each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++];
+ each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++];
+ each->prio_pg_map[0] = cmd.rsp.arg[k++];
+ each->prio_pg_map[1] = cmd.rsp.arg[k++];
+ each->pg_bw_map[0] = cmd.rsp.arg[k++];
+ each->pg_bw_map[1] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[0] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[1] = cmd.rsp.arg[k++];
+ val = each->hdr_prio_pfc_map[0];
+
+ max_app = qlcnic_dcb_get_num_app(adapter, val);
+ for (i = 0; i < max_app; i++)
+ each->app[i] = cmd.rsp.arg[i + k];
+
+ size = 16 * sizeof(u32);
+ memcpy(p, &each->hdr_prio_pfc_map[0], size);
+ p += size;
+ if (j == 0)
+ k = 18;
+ else
+ k = 34;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+ int err;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
+ if (err)
+ return err;
+
+ qlcnic_dcb_data_cee_param_map(adapter);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
+ bool flag)
+{
+ u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = QLC_DCB_AEN_BIT;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
+ (flag ? "register" : "unregister"), err);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
+ void *data)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+ u32 *val = data;
+
+ if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ return;
+
+ if (*val & BIT_8)
+ set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ else
+ clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 i, tc, pgid;
+
+ for (i = 0; i < QLC_DCB_MAX_PRIO; i++) {
+ tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i);
+ tc_cfg = &type->tc_cfg[tc];
+ tc_cfg->valid = true;
+ tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i);
+
+ if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) &&
+ type->pfc_mode_enable) {
+ tc_cfg->prio_cfg[i].valid = true;
+ tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL;
+ }
+
+ if (i < 4)
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i);
+ else
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i);
+
+ tc_cfg->pgid = pgid;
+
+ tc_cfg->prio_type = QLC_PRIO_LINK;
+ type->pg_cfg[tc_cfg->pgid].prio_count++;
+ }
+}
+
+static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_pg_cfg *pg_cfg;
+ u8 i, tsa, bw_per;
+
+ for (i = 0; i < QLC_DCB_MAX_PG; i++) {
+ pg_cfg = &type->pg_cfg[i];
+ pg_cfg->valid = true;
+
+ if (i < 4) {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i);
+ } else {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i);
+ }
+
+ pg_cfg->total_bw_percent = bw_per;
+ pg_cfg->tsa_type = tsa;
+ }
+}
+
+static void
+qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_app *app;
+ u8 i, num_app, map, cnt;
+ struct dcb_app new_app;
+
+ num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]);
+ for (i = 0; i < num_app; i++) {
+ app = &type->app[i];
+ app->valid = true;
+
+ /* Only for CEE (-1) */
+ app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1;
+ new_app.selector = app->selector;
+ app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]);
+ new_app.protocol = app->protocol;
+ map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
+ cnt = qlcnic_dcb_prio_count(map);
+
+ if (cnt >= QLC_DCB_MAX_TC)
+ cnt = 0;
+
+ app->priority = cnt;
+ new_app.priority = cnt;
+
+ if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops)
+ dcb_setapp(adapter->netdev, &new_app);
+ }
+}
+
+static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx)
+{
+ struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param;
+ struct qlcnic_dcb_param *each = &mbx->type[idx];
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_cee *type = &cfg->type[idx];
+
+ type->tc_param_valid = false;
+ type->pfc_mode_enable = false;
+ memset(type->tc_cfg, 0,
+ sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC);
+ memset(type->pg_cfg, 0,
+ sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC);
+
+ if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_pfc_tc)
+ type->pfc_mode_enable = true;
+
+ if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_ets_tc)
+ type->tc_param_valid = true;
+
+ qlcnic_dcb_fill_cee_tc_params(mbx, each, type);
+ qlcnic_dcb_fill_cee_pg_params(each, type);
+ qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type);
+}
+
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < QLC_DCB_NUM_PARAM; i++)
+ qlcnic_dcb_map_cee_params(adapter, i);
+
+ dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+}
+
+static u8 qlcnic_dcb_get_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
+}
+
+static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
+{
+ memcpy(addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void
+qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
+ u8 *pgid, u8 *bw_per, u8 *up_tc_map)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg, *temp;
+ struct qlcnic_dcb_cee *type;
+ u8 i, cnt, pg;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *prio = *pgid = *bw_per = *up_tc_map = 0;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (tc < 0 || (tc > QLC_DCB_MAX_TC))
+ return;
+
+ tc_cfg = &type->tc_cfg[tc];
+ if (!tc_cfg->valid)
+ return;
+
+ *pgid = tc_cfg->pgid;
+ *prio = tc_cfg->prio_type;
+ *up_tc_map = tc_cfg->up_tc_map;
+ pg = *pgid;
+
+ for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) {
+ temp = &type->tc_cfg[i];
+ if (temp->valid && (pg == temp->pgid))
+ cnt++;
+ }
+
+ tc_cfg->bwg_percent = (100 / cnt);
+ *bw_per = tc_cfg->bwg_percent;
+}
+
+static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_pg_cfg *pgcfg;
+ struct qlcnic_dcb_cee *type;
+
+ *bw_pct = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
+ return;
+
+ pgcfg = &type->pg_cfg[pgid];
+ if (!pgcfg->valid)
+ return;
+
+ *bw_pct = pgcfg->total_bw_percent;
+}
+
+static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 val = QLC_DCB_GET_MAP(prio);
+ struct qlcnic_dcb_cee *type;
+ u8 i;
+
+ *setting = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ !type->pfc_mode_enable)
+ return;
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc_cfg = &type->tc_cfg[i];
+ if (!tc_cfg->valid)
+ continue;
+
+ if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid))
+ *setting = tc_cfg->prio_cfg[prio].pfc_type;
+ }
+}
+
+static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
+ u8 *cap)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = adapter->dcb->cfg->capability.dcb_capability;
+ break;
+ default:
+ *cap = false;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return -EINVAL;
+
+ switch (attr) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = cfg->capability.max_ets_tc;
+ return 0;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = cfg->capability.max_pfc_tc;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
+}
+
+static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ return cfg->capability.dcb_capability;
+}
+
+static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *type;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 1;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *flag = 0;
+
+ switch (fid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (type->tc_param_valid)
+ *flag |= DCB_FEATCFG_ENABLE;
+ else
+ *flag |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (type->pfc_mode_enable) {
+ if (type->tc_cfg[0].prio_cfg[0].pfc_type)
+ *flag |= DCB_FEATCFG_ENABLE;
+ } else {
+ *flag |= DCB_FEATCFG_ERROR;
+ }
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ *flag |= DCB_FEATCFG_ENABLE;
+ break;
+ default:
+ netdev_err(netdev, "Invalid Feature ID %d\n", fid);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+ *bw_pct = 0;
+}
+
+static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ int i;
+
+ *app_count = 0;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++) {
+ if (peer->app[i].valid)
+ (*app_count)++;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
+ struct dcb_app *table)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ struct qlcnic_dcb_app *app;
+ int i, j;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) {
+ app = &peer->app[i];
+ if (!app->valid)
+ continue;
+
+ table[j].selector = app->selector;
+ table[j].priority = app->priority;
+ table[j++].protocol = app->protocol;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ u8 i, j, k, map;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) {
+ if (!peer->pg_cfg[i].valid)
+ continue;
+
+ pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
+
+ for (k = 0; k < QLC_DCB_MAX_TC; k++) {
+ if (peer->tc_cfg[i].valid &&
+ (peer->tc_cfg[i].pgid == i)) {
+ map = peer->tc_cfg[i].up_tc_map;
+ pg->prio_pg[j++] = map;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_tc_cfg *tc;
+ struct qlcnic_dcb_cee *peer;
+ u8 i, setting, prio;
+
+ pfc->pfc_en = 0;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc = &peer->tc_cfg[i];
+ prio = qlcnic_dcb_prio_count(tc->up_tc_map);
+
+ setting = 0;
+ qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting);
+ if (setting)
+ pfc->pfc_en |= QLC_DCB_GET_MAP(i);
+ }
+
+ pfc->tcs_supported = cfg->capability.max_pfc_tc;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = {
+ .getstate = qlcnic_dcb_get_state,
+ .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr,
+ .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx,
+ .getpfccfg = qlcnic_dcb_get_pfc_cfg,
+ .getcap = qlcnic_dcb_get_capability,
+ .getnumtcs = qlcnic_dcb_get_num_tcs,
+ .getapp = qlcnic_dcb_get_app,
+ .getpfcstate = qlcnic_dcb_get_pfc_state,
+ .getdcbx = qlcnic_dcb_get_dcbx,
+ .getfeatcfg = qlcnic_dcb_get_feat_cfg,
+
+ .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx,
+
+ .peer_getappinfo = qlcnic_dcb_peer_app_info,
+ .peer_getapptable = qlcnic_dcb_peer_app_table,
+ .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg,
+ .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
new file mode 100644
index 000000000000..b87ce9fb503e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -0,0 +1,41 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_DCBX_H
+#define __QLCNIC_DCBX_H
+
+void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
+
+#ifdef CONFIG_QLCNIC_DCB
+int __qlcnic_register_dcb(struct qlcnic_adapter *);
+#else
+static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+struct qlcnic_dcb_ops {
+ void (*init_dcbnl_ops) (struct qlcnic_adapter *);
+ void (*free) (struct qlcnic_adapter *);
+ int (*attach) (struct qlcnic_adapter *);
+ int (*query_hw_capability) (struct qlcnic_adapter *, char *);
+ int (*get_hw_capability) (struct qlcnic_adapter *);
+ void (*get_info) (struct qlcnic_adapter *);
+ int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
+ int (*get_cee_cfg) (struct qlcnic_adapter *);
+ int (*register_aen) (struct qlcnic_adapter *, bool);
+ void (*handle_aen) (struct qlcnic_adapter *, void *);
+};
+
+struct qlcnic_dcb {
+ struct qlcnic_dcb_mbx_params *param;
+ struct qlcnic_adapter *adapter;
+ struct delayed_work aen_work;
+ struct workqueue_struct *wq;
+ struct qlcnic_dcb_ops *ops;
+ struct qlcnic_dcb_cfg *cfg;
+};
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 7aac23ab31d1..4d7ad0074d1c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -125,6 +125,14 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
};
#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
+ "xmit_on",
+ "xmit_off",
+ "xmit_called",
+ "xmit_finished",
+};
+
static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
"ctx_rx_bytes",
"ctx_rx_pkts",
@@ -630,15 +638,15 @@ qlcnic_set_ringparam(struct net_device *dev,
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
- int min;
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int min;
min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
channel->max_rx = rounddown_pow_of_two(min);
- channel->max_tx = adapter->ahw->max_tx_ques;
+ channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->ahw->max_tx_ques;
+ channel->tx_count = adapter->max_drv_tx_rings;
}
static int qlcnic_set_channels(struct net_device *dev,
@@ -646,18 +654,27 @@ static int qlcnic_set_channels(struct net_device *dev,
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
+ int txq = 0;
- if (channel->other_count || channel->combined_count ||
- channel->tx_count != channel->max_tx)
+ if (channel->other_count || channel->combined_count)
return -EINVAL;
- err = qlcnic_validate_max_rss(adapter, channel->rx_count);
- if (err)
- return err;
+ if (channel->rx_count) {
+ err = qlcnic_validate_max_rss(adapter, channel->rx_count);
+ if (err)
+ return err;
+ }
+
+ if (channel->tx_count) {
+ err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
+ if (err)
+ return err;
+ txq = channel->tx_count;
+ }
- err = qlcnic_set_max_rss(adapter, channel->rx_count, 0);
- netdev_info(dev, "allocated 0x%x sds rings\n",
- adapter->max_sds_rings);
+ err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
+ netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
+ adapter->max_sds_rings, adapter->max_drv_tx_rings);
return err;
}
@@ -893,6 +910,7 @@ free_diag_res:
clear_diag_irq:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
return ret;
}
@@ -966,6 +984,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_drv_tx_rings = adapter->max_drv_tx_rings;
int max_sds_rings = adapter->max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1006,9 +1025,9 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
msleep(500);
qlcnic_process_rcv_ring_diag(sds_ring);
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
- netdev_info(netdev, "firmware didnt respond to loopback"
- " configure request\n");
- ret = -QLCNIC_FW_NOT_RESPOND;
+ netdev_info(netdev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -ETIMEDOUT;
goto free_res;
} else if (adapter->ahw->diag_cnt) {
ret = adapter->ahw->diag_cnt;
@@ -1025,6 +1044,7 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
clear_it:
adapter->max_sds_rings = max_sds_rings;
+ adapter->max_drv_tx_rings = max_drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
@@ -1077,11 +1097,21 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
+ for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+ for (index = 0; index < num_stats; index++) {
+ sprintf(data, "tx_ring_%d %s", i,
+ qlcnic_tx_ring_stats_strings[index]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
for (index = 0; index < QLCNIC_STATS_LEN; index++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
+
if (qlcnic_83xx_check(adapter)) {
num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
for (i = 0; i < num_stats; i++, index++)
@@ -1173,11 +1203,22 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret, length, size;
+ int index, ret, length, size, ring;
char *p;
+ memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
+ for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ tx_ring = &adapter->tx_ring[ring];
+ *data++ = tx_ring->xmit_on;
+ *data++ = tx_ring->xmit_off;
+ *data++ = tx_ring->xmit_called;
+ *data++ = tx_ring->xmit_finished;
+ }
+ }
memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
@@ -1468,6 +1509,68 @@ static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
adapter->ahw->msg_enable = msglvl;
}
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = true;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump enabled\n");
+
+ return 0;
+}
+
+static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val |= QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = false;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump disabled\n");
+
+ return 0;
+}
+
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool state;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true;
+ } else {
+ state = fw_dump->enable;
+ }
+
+ return state;
+}
+
static int
qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
{
@@ -1484,7 +1587,7 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
else
dump->len = 0;
- if (!fw_dump->enable)
+ if (!qlcnic_check_fw_dump_state(adapter))
dump->flag = ETH_FW_DUMP_DISABLE;
else
dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
@@ -1532,77 +1635,111 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
return 0;
}
+static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct net_device *netdev = adapter->netdev;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ netdev_info(netdev,
+ "Can not change driver mask to 0x%x. FW dump not enabled\n",
+ mask);
+ return -EOPNOTSUPP;
+ }
+
+ fw_dump->tmpl_hdr->drv_cap_mask = mask;
+ netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
+ return 0;
+}
+
static int
qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
- int i;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool valid_mask = false;
+ int i, ret = 0;
u32 state;
switch (val->flag) {
case QLCNIC_FORCE_FW_DUMP_KEY:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
- if (!fw_dump->enable) {
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
netdev_info(netdev, "FW dump not enabled\n");
- return 0;
+ ret = -EOPNOTSUPP;
+ break;
}
+
if (fw_dump->clr) {
netdev_info(netdev,
- "Previous dump not cleared, not forcing dump\n");
- return 0;
+ "Previous dump not cleared, not forcing dump\n");
+ break;
}
+
netdev_info(netdev, "Forcing a FW dump\n");
qlcnic_dev_request_reset(adapter, val->flag);
break;
case QLCNIC_DISABLE_FW_DUMP:
- if (fw_dump->enable && fw_dump->tmpl_hdr) {
- netdev_info(netdev, "Disabling FW dump\n");
- fw_dump->enable = 0;
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
}
- return 0;
+
+ ret = qlcnic_disable_fw_dump_state(adapter);
+ break;
+
case QLCNIC_ENABLE_FW_DUMP:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
- }
- if (!fw_dump->enable) {
- netdev_info(netdev, "Enabling FW dump\n");
- fw_dump->enable = 1;
+ ret = -EOPNOTSUPP;
+ break;
}
- return 0;
+
+ ret = qlcnic_enable_fw_dump_state(adapter);
+ break;
+
case QLCNIC_FORCE_FW_RESET:
netdev_info(netdev, "Forcing a FW reset\n");
qlcnic_dev_request_reset(adapter, val->flag);
adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
- return 0;
+ break;
+
case QLCNIC_SET_QUIESCENT:
case QLCNIC_RESET_QUIESCENT:
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
netdev_info(netdev, "Device in FAILED state\n");
- return 0;
+ break;
+
default:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
+
for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
if (val->flag == qlcnic_fw_dump_level[i]) {
- fw_dump->tmpl_hdr->drv_cap_mask =
- val->flag;
- netdev_info(netdev, "Driver mask changed to: 0x%x\n",
- fw_dump->tmpl_hdr->drv_cap_mask);
- return 0;
+ valid_mask = true;
+ break;
}
}
- netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag);
- return -EINVAL;
+
+ if (valid_mask) {
+ ret = qlcnic_set_dump_mask(adapter, val->flag);
+ } else {
+ netdev_info(netdev, "Invalid dump level: 0x%x\n",
+ val->flag);
+ ret = -EINVAL;
+ }
}
- return 0;
+ return ret;
}
const struct ethtool_ops qlcnic_ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 4d5f59b2d153..f8adc7b01f1f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -387,7 +387,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return -EIO;
- tx_ring = adapter->tx_ring;
+ tx_ring = &adapter->tx_ring[0];
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
@@ -740,6 +740,22 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = qlcnic_get_mac_address(adapter, mac,
+ adapter->ahw->physical_port);
+ if (ret)
+ return ret;
+
+ memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
+ adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
+
+ return 0;
+}
+
/*
* Send the interrupt coalescing parameter set by ethtool to the card.
*/
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4a71b28effcb..272c356cf9b2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -85,8 +85,11 @@ enum qlcnic_regs {
#define QLCNIC_CMD_GET_TEMP_HDR 0x30
#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
#define QLCNIC_CMD_CONFIG_VPORT 0x32
+#define QLCNIC_CMD_DCB_QUERY_CAP 0x34
+#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35
#define QLCNIC_CMD_GET_MAC_STATS 0x37
#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
+#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39
#define QLCNIC_CMD_GET_LED_STATUS 0x3C
#define QLCNIC_CMD_CONFIGURE_RSS 0x41
#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
@@ -122,6 +125,7 @@ enum qlcnic_regs {
#define QLCNIC_MBX_COMP_EVENT 0x8100
#define QLCNIC_MBX_REQUEST_EVENT 0x8101
#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
+#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110
#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
@@ -149,7 +153,6 @@ struct ethtool_stats;
struct pci_device_id;
struct qlcnic_host_sds_ring;
struct qlcnic_host_tx_ring;
-struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
@@ -173,10 +176,12 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *tx_ring, int);
@@ -184,7 +189,7 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 974d62607e13..66c26cf7a2b8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -127,12 +127,12 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
}
}
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_buffer *cmd_buf;
struct qlcnic_skb_frag *buffrag;
int i, j;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
@@ -241,7 +241,13 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
sds_ring->num_desc = adapter->num_rxd;
-
+ if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ sds_ring->tx_ring = &adapter->tx_ring[ring];
+ else
+ sds_ring->tx_ring = &adapter->tx_ring[0];
+ }
for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
INIT_LIST_HEAD(&sds_ring->free_list[i]);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6946d354f44f..b7b245b43b87 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -127,6 +127,23 @@
struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
struct qlcnic_host_rds_ring *, u16, u16);
+inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+
+static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
@@ -147,10 +164,7 @@ static inline u8 qlcnic_mac_hash(u64 mac)
static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
u16 handle, u8 ring_id)
{
- unsigned short device = adapter->pdev->device;
-
- if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
+ if (qlcnic_83xx_check(adapter))
return handle | (ring_id << 15);
else
return handle;
@@ -357,14 +371,14 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
}
static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
- struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
+ struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 l4proto, opcode = 0, hdr_len = 0;
u16 flags = 0, vlan_tci = 0;
int copied, offset, copy_len, size;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u16 protocol = ntohs(skb->protocol);
u32 producer = tx_ring->producer;
@@ -547,7 +561,7 @@ static inline void qlcnic_clear_cmddesc(u64 *desc)
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_cmd_buffer *pbuf;
struct qlcnic_skb_frag *buffrag;
struct cmd_desc_type0 *hwdesc, *first_desc;
@@ -556,10 +570,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int i, k, frag_count, delta = 0;
u32 producer, num_txd;
- num_txd = tx_ring->num_desc;
-
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
return NETDEV_TX_BUSY;
}
@@ -569,7 +581,14 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
goto drop_packet;
}
+ if (qlcnic_check_multi_tx(adapter))
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
+ else
+ tx_ring = &adapter->tx_ring[0];
+ num_txd = tx_ring->num_desc;
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
+
/* 14 frags supported for normal packet and
* 32 frags supported for TSO packet
*/
@@ -584,11 +603,12 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(tx_ring->txq);
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_start_queue(netdev);
+ netif_tx_start_queue(tx_ring->txq);
} else {
adapter->stats.xmit_off++;
+ tx_ring->xmit_off++;
return NETDEV_TX_BUSY;
}
}
@@ -643,7 +663,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->producer = get_next_index(producer, num_txd);
smp_mb();
- if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
goto unwind_buff;
if (adapter->drv_mac_learn)
@@ -651,6 +671,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
+ tx_ring->xmit_called++;
qlcnic_update_cmd_producer(tx_ring);
@@ -673,7 +694,7 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
adapter->ahw->linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
}
} else if (!adapter->ahw->linkup && linkup) {
netdev_info(netdev, "NIC Link is up\n");
@@ -768,9 +789,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- if (!spin_trylock(&adapter->tx_clean_lock))
- return 1;
-
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
@@ -788,6 +806,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
frag->dma = 0ULL;
}
adapter->stats.xmitfinished++;
+ tx_ring->xmit_finished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
@@ -800,10 +819,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
if (count && netif_running(netdev)) {
tx_ring->sw_consumer = sw_consumer;
smp_mb();
- if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ if (netif_tx_queue_stopped(tx_ring->txq) &&
+ netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_wake_queue(netdev);
+ netif_tx_wake_queue(tx_ring->txq);
adapter->stats.xmit_on++;
+ tx_ring->xmit_on++;
}
}
adapter->tx_timeo_cnt = 0;
@@ -823,7 +844,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
return done;
}
@@ -833,16 +853,40 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
int tx_complete, work_done;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
adapter = sds_ring->adapter;
- tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
+ tx_ring = sds_ring->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
qlcnic_enable_int(sds_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+
+ return work_done;
+}
+
+static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
return work_done;
@@ -952,20 +996,23 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
break;
case 1:
dev_info(dev, "loopback already in progress\n");
- adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+ adapter->ahw->diag_cnt = -EINPROGRESS;
break;
case 2:
dev_info(dev, "loopback cable is not connected\n");
- adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+ adapter->ahw->diag_cnt = -ENODEV;
break;
default:
dev_info(dev,
"loopback configure request failed, err %x\n",
ret);
- adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+ adapter->ahw->diag_cnt = -EIO;
break;
}
break;
+ case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
+ qlcnic_dcb_handle_aen(adapter, (void *)&msg);
+ break;
default:
break;
}
@@ -1411,23 +1458,31 @@ void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int ring, max_sds_rings;
+ int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
- max_sds_rings = adapter->max_sds_rings;
-
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- if (ring == adapter->max_sds_rings - 1)
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
- QLCNIC_NETDEV_WEIGHT / max_sds_rings);
- else
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1)) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
- QLCNIC_NETDEV_WEIGHT*2);
+ NAPI_POLL_WEIGHT);
+ } else {
+ if (ring == (adapter->max_sds_rings - 1))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll,
+ NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll,
+ NAPI_POLL_WEIGHT);
+ }
}
if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1435,6 +1490,14 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
return -ENOMEM;
}
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
return 0;
}
@@ -1443,6 +1506,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
@@ -1450,6 +1514,14 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
}
qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
qlcnic_free_tx_rings(adapter);
}
@@ -1457,6 +1529,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1467,12 +1540,24 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
napi_enable(&sds_ring->napi);
qlcnic_enable_int(sds_ring);
}
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
}
void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1484,6 +1569,17 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ qlcnic_check_multi_tx(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_int(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
}
#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
@@ -1864,7 +1960,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int ring, max_sds_rings, temp;
+ int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -1872,25 +1968,22 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
- max_sds_rings = adapter->max_sds_rings;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_rx_poll,
- QLCNIC_NETDEV_WEIGHT * 2);
- } else {
- temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
+ NAPI_POLL_WEIGHT);
+ else
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_msix_sriov_vf_poll,
- temp);
- }
+ NAPI_POLL_WEIGHT);
} else {
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_poll,
- QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+ NAPI_POLL_WEIGHT);
}
}
@@ -1905,7 +1998,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi,
qlcnic_83xx_msix_tx_poll,
- QLCNIC_NETDEV_WEIGHT);
+ NAPI_POLL_WEIGHT);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index bc05d016c859..c4c5023e1fdf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -100,6 +100,8 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
{0,}
};
@@ -146,6 +148,11 @@ static const u32 qlcnic_reg_tbl[] = {
static const struct qlcnic_board_info qlcnic_boards[] = {
{ PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE844X,
+ 0x0,
+ 0x0,
+ "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
PCI_DEVICE_ID_QLOGIC_QLE834X,
PCI_VENDOR_ID_QLOGIC,
0x24e,
@@ -254,7 +261,6 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
-#define QLC_MAX_SDS_RINGS 8
static const
struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@ -278,12 +284,15 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
- u8 mac_addr[ETH_ALEN];
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u8 mac_addr[ETH_ALEN];
+ int ret;
- if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
- return -EIO;
+ ret = qlcnic_get_mac_address(adapter, mac_addr,
+ adapter->ahw->pci_func);
+ if (ret)
+ return ret;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -425,6 +434,21 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
cancel_delayed_work_sync(&adapter->fw_work);
}
+static int qlcnic_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(ahw->phys_port_id);
+ memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -442,6 +466,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_add = qlcnic_fdb_add,
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
+ .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -514,13 +539,36 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.get_board_info = qlcnic_82xx_get_board_info,
.set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
.free_mac_list = qlcnic_82xx_free_mac_list,
+ .read_phys_port_id = qlcnic_82xx_read_phys_port_id,
+ .io_error_detected = qlcnic_82xx_io_error_detected,
+ .io_slot_reset = qlcnic_82xx_io_slot_reset,
+ .io_resume = qlcnic_82xx_io_resume,
};
+static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int num_tx_q;
+
+ if (ahw->msix_supported &&
+ (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
+ num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
+ num_online_cpus());
+ if (num_tx_q > 1) {
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
+ &adapter->state);
+ adapter->max_drv_tx_rings = num_tx_q;
+ }
+ } else {
+ adapter->max_drv_tx_rings = 1;
+ }
+}
+
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
+ int max_tx_rings, max_sds_rings, tx_vector;
int err = -1, i;
- int max_tx_rings, tx_vector;
if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
max_tx_rings = 0;
@@ -554,7 +602,15 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
adapter->max_sds_rings = num_msix -
max_tx_rings - 1;
} else {
- adapter->max_sds_rings = num_msix;
+ adapter->ahw->num_msix = num_msix;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1))
+ max_sds_rings = num_msix - max_tx_rings;
+ else
+ max_sds_rings = num_msix;
+
+ adapter->max_sds_rings = max_sds_rings;
}
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
@@ -570,6 +626,8 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
num_msix += (max_tx_rings + 1);
} else {
num_msix = rounddown_pow_of_two(err);
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += max_tx_rings;
}
if (num_msix) {
@@ -605,6 +663,7 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
adapter->msix_entries[0].vector = pdev->irq;
return err;
}
+
if (qlcnic_use_msi || qlcnic_use_msi_x)
return -EOPNOTSUPP;
@@ -621,28 +680,69 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int num_msix, err = 0;
if (!num_intr)
num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
- if (adapter->ahw->msix_supported)
+ if (ahw->msix_supported) {
num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
num_intr));
- else
+ if (qlcnic_check_multi_tx(adapter)) {
+ if (txq)
+ adapter->max_drv_tx_rings = txq;
+ num_msix += adapter->max_drv_tx_rings;
+ }
+ } else {
num_msix = 1;
+ }
err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM || !err)
+ if (err == -ENOMEM)
return err;
- err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
+
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (!err)
+ return err;
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ ahw->intr_tbl = vzalloc(ahw->num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ err = qlcnic_82xx_config_intrpt(adapter, 1);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure Interrupt for %d vector\n",
+ ahw->num_msix);
return err;
+ }
- return -EIO;
+ return 0;
}
void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
@@ -696,6 +796,23 @@ static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
return ret;
}
+static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
+{
+ bool ret = false;
+
+ if (qlcnic_84xx_check(adapter)) {
+ ret = true;
+ } else if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG)
+ ret = true;
+ else
+ ret = false;
+ }
+
+ return ret;
+}
+
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
@@ -739,18 +856,30 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
(pci_info[i].type != QLCNIC_TYPE_NIC))
continue;
+ if (qlcnic_port_eswitch_cfg_capability(adapter)) {
+ if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
+ adapter->npars[j].eswitch_status = true;
+ else
+ continue;
+ } else {
+ adapter->npars[j].eswitch_status = true;
+ }
+
adapter->npars[j].pci_func = pfn;
adapter->npars[j].active = (u8)pci_info[i].active;
adapter->npars[j].type = (u8)pci_info[i].type;
adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+
j++;
}
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
- adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
- if (qlcnic_83xx_check(adapter))
+ if (qlcnic_82xx_check(adapter)) {
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
qlcnic_enable_eswitch(adapter, i, 1);
}
@@ -829,7 +958,9 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
@@ -870,8 +1001,8 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
return 0;
}
-static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
- int index)
+static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
+ int index)
{
struct pci_dev *pdev = adapter->pdev;
unsigned short subsystem_vendor;
@@ -1173,6 +1304,9 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
return 0;
for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
esw_cfg.pci_func = adapter->npars[i].pci_func;
esw_cfg.mac_override = BIT_0;
@@ -1235,6 +1369,9 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->ahw->act_pci_func; i++) {
npar = &adapter->npars[i];
pci_func = npar->pci_func;
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
memset(&nic_info, 0, sizeof(struct qlcnic_info));
err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (err)
@@ -1413,6 +1550,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
for (ring = 0; ring < num_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter) &&
(ring == (num_sds_rings - 1))) {
if (!(adapter->flags &
QLCNIC_MSIX_ENABLED))
@@ -1436,9 +1574,11 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
return err;
}
}
- if (qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_MSIX_ENABLED) &&
- !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if ((qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter)) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
handler = qlcnic_msix_tx_intr;
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
@@ -1473,8 +1613,10 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
free_irq(sds_ring->irq, sds_ring);
}
}
- if (qlcnic_83xx_check(adapter) &&
- !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if ((qlcnic_83xx_check(adapter) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter))) {
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
@@ -1510,8 +1652,10 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
return 0;
+
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
+
qlcnic_get_lro_mss_capability(adapter);
if (qlcnic_fw_create_ctx(adapter))
@@ -1558,6 +1702,8 @@ int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
+ int ring;
+
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1567,7 +1713,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1585,8 +1730,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
qlcnic_reset_rx_buffers_list(adapter);
- qlcnic_release_tx_buffers(adapter);
- spin_unlock(&adapter->tx_clean_lock);
+
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
}
/* Usage: During suspend and firmware recovery module */
@@ -1666,6 +1812,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
+ int max_tx_rings = adapter->max_drv_tx_rings;
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
@@ -1682,6 +1829,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
adapter->ahw->diag_test = 0;
adapter->max_sds_rings = max_sds_rings;
+ adapter->max_drv_tx_rings = max_tx_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1750,6 +1898,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->max_sds_rings = 1;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
+ adapter->max_drv_tx_rings = 1;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1907,12 +2056,18 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
+ err = qlcnic_set_real_num_queues(adapter, netdev);
+ if (err)
+ return err;
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "failed to register net device\n");
return err;
}
+ qlcnic_dcb_init_dcbnl_ops(adapter);
+
return 0;
}
@@ -1975,7 +2130,8 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
tx_ring->cmd_buf_arr = cmd_buf_arr;
}
- if (qlcnic_83xx_check(adapter)) {
+ if (qlcnic_83xx_check(adapter) ||
+ (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->adapter = adapter;
@@ -1986,6 +2142,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
}
}
}
+
return 0;
}
@@ -2004,6 +2161,17 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
}
+static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+ return __qlcnic_register_dcb(adapter);
+}
+
+void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
+{
+ kfree(adapter->dcb);
+ adapter->dcb = NULL;
+}
+
static int
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -2048,9 +2216,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
qlcnic_83xx_register_map(ahw);
break;
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
qlcnic_sriov_vf_register_map(ahw);
break;
default:
@@ -2061,7 +2231,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_out_free_hw_res;
- netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+ netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
+ QLCNIC_MAX_TX_RINGS);
if (!netdev) {
err = -ENOMEM;
goto err_out_iounmap;
@@ -2091,14 +2262,14 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->fdb_mac_learn = true;
else if (qlcnic_mac_learn == DRV_MAC_LEARN)
adapter->drv_mac_learn = true;
- adapter->max_drv_tx_rings = 1;
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
- spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
+ qlcnic_register_dcb(adapter);
+
if (qlcnic_82xx_check(adapter)) {
qlcnic_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
@@ -2108,12 +2279,31 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_hw;
}
+ qlcnic_get_multiq_capability(adapter);
+
+ if ((adapter->ahw->act_pci_func > 2) &&
+ qlcnic_check_multi_tx(adapter)) {
+ adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
+ dev_info(&adapter->pdev->dev,
+ "vNIC mode enabled, Set max TX rings = %d\n",
+ adapter->max_drv_tx_rings);
+ }
+
+ if (!qlcnic_check_multi_tx(adapter)) {
+ clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->max_drv_tx_rings = 1;
+ }
err = qlcnic_setup_idc_param(adapter);
if (err)
goto err_out_free_hw;
adapter->flags |= QLCNIC_NEED_FLR;
+
+ if (adapter->dcb && qlcnic_dcb_attach(adapter))
+ qlcnic_clear_dcb_ops(adapter);
+
} else if (qlcnic_83xx_check(adapter)) {
+ adapter->max_drv_tx_rings = 1;
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_83xx_init(adapter, pci_using_dac);
@@ -2132,6 +2322,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
+ qlcnic_read_phys_port_id(adapter);
+
if (adapter->portnum == 0) {
qlcnic_get_board_name(adapter, board_name);
@@ -2145,16 +2337,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 0);
- if (err) {
- dev_err(&pdev->dev, "Failed to setup interrupt\n");
- goto err_out_disable_msi;
- }
-
- if (qlcnic_83xx_check(adapter)) {
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- if (err)
+ if (qlcnic_82xx_check(adapter)) {
+ err = qlcnic_setup_intr(adapter, 0, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
+ }
}
err = qlcnic_get_act_pci_func(adapter);
@@ -2238,13 +2426,18 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
+ qlcnic_dcb_free(adapter);
+
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_register_nic_idc_func(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+ kfree(ahw->fw_info);
}
qlcnic_detach(adapter);
@@ -2278,6 +2471,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
destroy_workqueue(adapter->qlcnic_wq);
adapter->qlcnic_wq = NULL;
}
+
qlcnic_free_adapter_resources(adapter);
kfree(ahw);
free_netdev(netdev);
@@ -2336,7 +2530,7 @@ static int qlcnic_open(struct net_device *netdev)
if (err)
goto err_out;
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
return 0;
@@ -2468,6 +2662,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
static void qlcnic_tx_timeout(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return;
@@ -2481,6 +2677,25 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
+ if (qlcnic_82xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ dev_info(&netdev->dev, "ring=%d\n", ring);
+ dev_info(&netdev->dev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+ dev_info(&netdev->dev, "producer=%d\n",
+ readl(tx_ring->crb_cmd_producer));
+ dev_info(&netdev->dev, "sw_consumer = %d\n",
+ tx_ring->sw_consumer);
+ dev_info(&netdev->dev, "hw_consumer = %d\n",
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+ dev_info(&netdev->dev, "xmit-on=%llu\n",
+ tx_ring->xmit_on);
+ dev_info(&netdev->dev, "xmit-off=%llu\n",
+ tx_ring->xmit_off);
+ }
+ }
adapter->ahw->reset_context = 1;
}
}
@@ -2869,7 +3084,7 @@ skip_ack_check:
qlcnic_api_unlock(adapter);
rtnl_lock();
- if (adapter->ahw->fw_dump.enable &&
+ if (qlcnic_check_fw_dump_state(adapter) &&
(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
QLCDB(adapter, DRV, "Take FW dump\n");
qlcnic_dump_fw(adapter);
@@ -3074,6 +3289,8 @@ qlcnic_attach_work(struct work_struct *work)
return;
}
attach:
+ qlcnic_dcb_get_info(adapter);
+
if (netif_running(netdev)) {
if (qlcnic_up(adapter, netdev))
goto done;
@@ -3245,7 +3462,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
qlcnic_clr_drv_state(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = qlcnic_setup_intr(adapter, 0);
+ err = qlcnic_setup_intr(adapter, 0, 0);
if (err) {
kfree(adapter->msix_entries);
@@ -3253,19 +3470,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
- if (qlcnic_83xx_check(adapter)) {
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "failed to setup mbx interrupt\n");
- qlcnic_clr_all_drv_state(adapter, 1);
- clear_bit(__QLCNIC_AER, &adapter->state);
- goto done;
- }
- }
-
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (err) {
@@ -3286,8 +3490,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
-static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -3306,12 +3510,6 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
- if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_free_mbx_intr(adapter);
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
- cancel_delayed_work_sync(&adapter->idc_aen_work);
- }
-
qlcnic_detach(adapter);
qlcnic_teardown_intr(adapter);
@@ -3323,13 +3521,13 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
PCI_ERS_RESULT_RECOVERED;
}
-static void qlcnic_io_resume(struct pci_dev *pdev)
+void qlcnic_82xx_io_resume(struct pci_dev *pdev)
{
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3339,9 +3537,48 @@ static void qlcnic_io_resume(struct pci_dev *pdev)
if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
&adapter->state))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
- FW_POLL_DELAY);
+ FW_POLL_DELAY);
+}
+
+static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_error_detected) {
+ return hw_ops->io_error_detected(pdev, state);
+ } else {
+ dev_err(&pdev->dev, "AER error_detected handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_slot_reset) {
+ return hw_ops->io_slot_reset(pdev);
+ } else {
+ dev_err(&pdev->dev, "AER slot_reset handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static void qlcnic_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_resume)
+ hw_ops->io_resume(pdev);
+ else
+ dev_err(&pdev->dev, "AER resume handler not registered.\n");
}
+
static int
qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
{
@@ -3370,16 +3607,65 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
+{
+ struct net_device *netdev = adapter->netdev;
+ u8 max_hw = QLCNIC_MAX_TX_RINGS;
+ u32 max_allowed;
+
+ if (!qlcnic_82xx_check(adapter)) {
+ netdev_err(netdev, "No Multi TX-Q support\n");
+ return -EINVAL;
+ }
+
+ if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
+ return -EINVAL;
+ }
+
+ if (!qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi TX-Q support\n");
+ return -EINVAL;
+ }
+
+ if (txq > QLCNIC_MAX_TX_RINGS) {
+ netdev_err(netdev, "Invalid ring count\n");
+ return -EINVAL;
+ }
+
+ max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
+ num_online_cpus()));
+ if ((txq > max_allowed) || !is_power_of_2(txq)) {
+ if (!is_power_of_2(txq))
+ netdev_err(netdev,
+ "TX queue should be a power of 2\n");
+ if (txq > num_online_cpus())
+ netdev_err(netdev,
+ "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
+ num_online_cpus());
+ netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
- __u32 val)
+ __u32 val)
{
struct net_device *netdev = adapter->netdev;
u8 max_hw = adapter->ahw->max_rx_ques;
u32 max_allowed;
- if (val > QLC_MAX_SDS_RINGS) {
+ if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+ !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS support in INT-x mode\n");
+ return -EINVAL;
+ }
+
+ if (val > QLCNIC_MAX_SDS_RINGS) {
netdev_err(netdev, "RSS value should not be higher than %u\n",
- QLC_MAX_SDS_RINGS);
+ QLCNIC_MAX_SDS_RINGS);
return -EINVAL;
}
@@ -3409,27 +3695,48 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
return 0;
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
{
int err;
struct net_device *netdev = adapter->netdev;
+ int num_msix;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
+ if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+ !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS support in INT-x mode\n");
+ return -EINVAL;
+ }
+
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
qlcnic_detach(adapter);
+ if (qlcnic_82xx_check(adapter)) {
+ if (txq != 0)
+ adapter->max_drv_tx_rings = txq;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (txq > adapter->max_drv_tx_rings))
+ num_msix = adapter->max_drv_tx_rings;
+ else
+ num_msix = data;
+ }
+
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_enable_mbx_poll(adapter);
}
+ netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
+
qlcnic_teardown_intr(adapter);
- err = qlcnic_setup_intr(adapter, data);
+
+ err = qlcnic_setup_intr(adapter, data, txq);
if (err) {
kfree(adapter->msix_entries);
netdev_err(netdev, "failed to setup interrupt\n");
@@ -3457,8 +3764,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
goto done;
qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
- err = len;
- done:
+done:
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 79e54efe07b9..15513608d480 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1082,14 +1082,17 @@ flash_temp:
}
tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+ tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
+ dev_info(&adapter->pdev->dev,
+ "Default minidump capture mask 0x%x\n",
+ tmpl_hdr->cap_mask);
if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
ahw->fw_dump.use_pex_dma = true;
else
ahw->fw_dump.use_pex_dma = false;
- ahw->fw_dump.enable = 1;
+ qlcnic_enable_fw_dump_state(adapter);
return 0;
}
@@ -1112,7 +1115,11 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
ahw = adapter->ahw;
- if (!fw_dump->enable) {
+ /* Return if we don't have firmware dump template header */
+ if (!tmpl_hdr)
+ return -EIO;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
dev_info(&adapter->pdev->dev, "Dump not enabled\n");
return -EIO;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 5d40045b3cea..652cc13c5023 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -33,7 +33,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
-static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
struct qlcnic_cmd_args *);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
@@ -45,7 +45,7 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.get_mac_address = qlcnic_83xx_get_mac_address,
.setup_intr = qlcnic_83xx_setup_intr,
.alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
- .mbx_cmd = qlcnic_sriov_vf_mbx_op,
+ .mbx_cmd = qlcnic_sriov_issue_cmd,
.get_func_no = qlcnic_83xx_get_func_no,
.api_lock = qlcnic_83xx_cam_lock,
.api_unlock = qlcnic_83xx_cam_unlock,
@@ -286,96 +286,38 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
u32 *pay, u8 pci_func, u8 size)
{
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- unsigned long flags;
- u16 opcode;
- u8 mbx_err_code;
- int i, j;
-
- opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
-
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
- dev_info(&adapter->pdev->dev,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- dev_info(&adapter->pdev->dev, "Mailbox detached\n");
- return 0;
- }
-
- spin_lock_irqsave(&ahw->mbx_lock, flags);
-
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
- if (mbx_val) {
- QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
- spin_unlock_irqrestore(&ahw->mbx_lock, flags);
- return QLCNIC_RCODE_TIMEOUT;
- }
- /* Fill in mailbox registers */
- val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
- mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
-
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
- mbx_cmd = 0x1 | (1 << 4);
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct qlcnic_cmd_args cmd;
+ unsigned long timeout;
+ int err;
- if (qlcnic_sriov_pf_check(adapter))
- mbx_cmd |= (pci_func << 5);
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ cmd.hdr = hdr;
+ cmd.pay = pay;
+ cmd.pay_size = size;
+ cmd.func_num = pci_func;
+ cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
+ cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
- for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
- i++, j++) {
- writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
+ err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
}
- for (j = 0; j < size; j++, i++)
- writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
- /* Signal FW about the impending command */
- QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
-
- /* Waiting for the mailbox cmd to complete and while waiting here
- * some AEN might arrive. If more than 5 seconds expire we can
- * assume something is wrong.
- */
-poll:
- rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
- if (rsp != QLCNIC_RCODE_TIMEOUT) {
- /* Get the FW response data */
- fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
- if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
- __qlcnic_83xx_process_aen(adapter);
- goto poll;
- }
- mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
- rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
- opcode = QLCNIC_MBX_RSP(fw_data);
-
- switch (mbx_err_code) {
- case QLCNIC_MBX_RSP_OK:
- case QLCNIC_MBX_PORT_RSP_OK:
- rsp = QLCNIC_RCODE_SUCCESS;
- break;
- default:
- if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
- rsp = qlcnic_83xx_mac_rcode(adapter);
- if (!rsp)
- goto out;
- }
- dev_err(&adapter->pdev->dev,
- "MBX command 0x%x failed with err:0x%x\n",
- opcode, mbx_err_code);
- rsp = mbx_err_code;
- break;
- }
- goto out;
+ if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
}
- dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
- QLCNIC_MBX_RSP(mbx_cmd));
- rsp = QLCNIC_RCODE_TIMEOUT;
-out:
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return rsp;
+ return cmd.rsp_opcode;
}
static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
@@ -458,7 +400,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
+ adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
return 0;
}
@@ -490,11 +432,12 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_cmd_args cmd;
- int ret;
+ int ret = 0;
ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
if (ret)
@@ -522,8 +465,8 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
{
- struct qlcnic_info nic_info;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info;
int err;
err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
@@ -534,7 +477,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
if (err)
return -EIO;
- err = qlcnic_sriov_get_vf_acl(adapter);
+ err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
if (err)
return err;
@@ -564,7 +507,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
dev_warn(&adapter->pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 1);
+ err = qlcnic_setup_intr(adapter, 1, 0);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
@@ -590,6 +533,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
+ if (adapter->dcb && qlcnic_dcb_attach(adapter))
+ qlcnic_clear_dcb_ops(adapter);
+
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
goto err_out_send_channel_term;
@@ -597,6 +543,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
pci_set_drvdata(adapter->pdev, adapter);
dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
adapter->netdev->name);
+
qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
adapter->ahw->idc.delay);
return 0;
@@ -637,8 +584,6 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err;
- spin_lock_init(&ahw->mbx_lock);
- set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
ahw->reset_context = 0;
@@ -1085,6 +1030,7 @@ static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
if (test_bit(QLC_BC_VF_FLR, &vf->state))
return;
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
trans = list_first_entry(&vf->rcv_act.wait_list,
struct qlcnic_bc_trans, list);
adapter = vf->adapter;
@@ -1234,6 +1180,7 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
return;
}
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
cmd_op = hdr->cmd_op;
if (qlcnic_sriov_alloc_bc_trans(&trans))
return;
@@ -1359,7 +1306,7 @@ int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
if (enable)
cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
- err = qlcnic_83xx_mbx_op(adapter, &cmd);
+ err = qlcnic_83xx_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
@@ -1391,10 +1338,11 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
return -EIO;
}
-static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
struct device *dev = &adapter->pdev->dev;
struct qlcnic_bc_trans *trans;
int err;
@@ -1411,7 +1359,7 @@ static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
goto cleanup_transaction;
retry:
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
rsp = -EIO;
QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
@@ -1454,7 +1402,7 @@ err_out:
if (rsp == QLCNIC_RCODE_TIMEOUT) {
ahw->reset_context = 1;
adapter->need_fw_reset = 1;
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
}
cleanup_transaction:
@@ -1613,8 +1561,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
{
int err;
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
if (err)
@@ -1628,6 +1576,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
+ qlcnic_dcb_get_info(adapter);
+
return 0;
err_out_term_channel:
@@ -1657,8 +1607,10 @@ static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u8 i, max_ints = ahw->num_msix - 1;
- qlcnic_83xx_disable_mbx_intr(adapter);
netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
@@ -1702,6 +1654,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
struct device *dev = &adapter->pdev->dev;
struct qlc_83xx_idc *idc = &ahw->idc;
u8 func = ahw->pci_func;
@@ -1712,7 +1665,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
/* Skip the context reset and check if FW is hung */
if (adapter->reset_ctx_cnt < 3) {
adapter->need_fw_reset = 1;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
dev_info(dev,
"Resetting context, wait here to check if FW is in failed state\n");
return 0;
@@ -1737,7 +1690,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
__func__, adapter->reset_ctx_cnt, func);
set_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->need_fw_reset = 1;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
qlcnic_sriov_vf_detach(adapter);
adapter->need_fw_reset = 0;
@@ -1787,6 +1740,7 @@ static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
static int
qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlc_83xx_idc *idc = &adapter->ahw->idc;
dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
@@ -1794,7 +1748,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
set_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->tx_timeo_cnt = 0;
adapter->reset_ctx_cnt = 0;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
qlcnic_sriov_vf_detach(adapter);
}
@@ -1803,6 +1757,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlc_83xx_idc *idc = &adapter->ahw->idc;
u8 func = adapter->ahw->pci_func;
@@ -1812,7 +1767,7 @@ static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
set_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->tx_timeo_cnt = 0;
adapter->reset_ctx_cnt = 0;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
qlcnic_sriov_vf_detach(adapter);
}
return 0;
@@ -1990,7 +1945,7 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
int err;
set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
if (err)
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index eb49cd65378c..330d9a8774ad 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1183,10 +1183,19 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
u8 cmd_op, mode = vp->vlan_mode;
+ struct qlcnic_adapter *adapter;
+
+ adapter = vf->adapter;
cmd_op = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] |= 1 << 25;
+ /* For 84xx adapter in case of PVID , PFD should send vlan mode as
+ * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response
+ */
+ if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE)
+ return 0;
+
switch (mode) {
case QLC_GUEST_VLAN_MODE:
cmd->rsp.arg[1] = mode | 1 << 8;
@@ -1284,6 +1293,10 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
QLCNIC_CMD_GET_STATISTICS,
QLCNIC_CMD_GET_PORT_CONFIG,
QLCNIC_CMD_GET_LINK_STATUS,
+ QLCNIC_CMD_DCB_QUERY_CAP,
+ QLCNIC_CMD_DCB_QUERY_PARAM,
+ QLCNIC_CMD_INIT_NIC_FUNC,
+ QLCNIC_CMD_STOP_NIC_FUNC,
};
static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
@@ -1639,14 +1652,14 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!is_valid_ether_addr(mac) || vf >= num_vfs)
return -EINVAL;
- if (!compare_ether_addr(adapter->mac_addr, mac)) {
+ if (ether_addr_equal(adapter->mac_addr, mac)) {
netdev_err(netdev, "MAC address is already in use by the PF\n");
return -EINVAL;
}
for (i = 0; i < num_vfs; i++) {
vf_info = &sriov->vf_info[i];
- if (!compare_ether_addr(vf_info->vp->mac, mac)) {
+ if (ether_addr_equal(vf_info->vp->mac, mac)) {
netdev_err(netdev,
"MAC address is already in use by VF %d\n",
i);
@@ -1768,8 +1781,8 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
return 0;
}
-static inline __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
- struct qlcnic_vport *vp, int vf)
+static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vport *vp, int vf)
{
__u32 vlan = 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 660c3f5b2237..c6165d05cc13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -465,8 +465,14 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
memset(&pm_cfg, 0,
sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pci_func = adapter->npars[i].pci_func;
+ if (!adapter->npars[i].active)
+ continue;
+
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
pm_cfg[pci_func].dest_npar = 0;
pm_cfg[pci_func].pci_func = i;
@@ -632,8 +638,14 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
memset(&esw_cfg, 0,
sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pci_func = adapter->npars[i].pci_func;
+ if (!adapter->npars[i].active)
+ continue;
+
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
esw_cfg[pci_func].pci_func = pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
@@ -732,6 +744,9 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
if (ret)
return ret;
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
np_cfg[i].pci_func = i;
np_cfg[i].op_mode = (u8)nic_info.op_mode;
np_cfg[i].port_num = nic_info.phys_port;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 7e8d68263963..899433778466 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2149,7 +2149,7 @@ struct ql_adapter {
struct timer_list timer;
atomic_t lb_count;
/* Keep local copy of current mac address. */
- char current_mac_addr[6];
+ char current_mac_addr[ETH_ALEN];
};
/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 85e5c97191dd..6f87f2cde647 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1897,12 +1897,13 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
-
- if (regs->len > R8169_REGS_SIZE)
- regs->len = R8169_REGS_SIZE;
+ u32 __iomem *data = tp->mmio_addr;
+ u32 *dw = p;
+ int i;
rtl_lock_work(tp);
- memcpy_fromio(p, tp->mmio_addr, regs->len);
+ for (i = 0; i < R8169_REGS_SIZE; i += 4)
+ memcpy_fromio(dw++, data++, 4);
rtl_unlock_work(tp);
}
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 19a8a045e077..a30c4395b232 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740 and R8A7779.
+ R8A7740, R8A777x and R8A7790.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a753928bab9c..5cd831ebfa83 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -189,6 +189,7 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
[RMCR] = 0x0258,
[TFUCR] = 0x0264,
[RFOCR] = 0x0268,
+ [RMIIMODE] = 0x026c,
[FCFTR] = 0x0270,
[TRIMD] = 0x027c,
};
@@ -377,6 +378,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_r8a777x,
+ .register_type = SH_ETH_REG_FAST_RCAR,
+
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = 0x01ff009f,
@@ -392,6 +395,30 @@ static struct sh_eth_cpu_data r8a777x_data = {
.hw_swap = 1,
};
+/* R8A7790 */
+static struct sh_eth_cpu_data r8a7790_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate_r8a777x,
+
+ .register_type = SH_ETH_REG_FAST_RCAR,
+
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
+ .eesipr_value = 0x01ff009f,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+ EESR_ECI,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .rmiimode = 1,
+ .shift_rd0 = 1,
+};
+
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -413,6 +440,8 @@ static struct sh_eth_cpu_data sh7724_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7724,
+ .register_type = SH_ETH_REG_FAST_SH4,
+
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = 0x01ff009f,
@@ -451,6 +480,8 @@ static struct sh_eth_cpu_data sh7757_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7757,
+ .register_type = SH_ETH_REG_FAST_SH4,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.rmcr_value = 0x00000001,
@@ -519,6 +550,8 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_giga,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -577,6 +610,8 @@ static struct sh_eth_cpu_data sh7734_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -604,6 +639,8 @@ static struct sh_eth_cpu_data sh7763_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -641,6 +678,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -663,6 +702,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
};
static struct sh_eth_cpu_data sh7619_data = {
+ .register_type = SH_ETH_REG_FAST_SH3_SH2,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.apr = 1,
@@ -672,6 +713,8 @@ static struct sh_eth_cpu_data sh7619_data = {
};
static struct sh_eth_cpu_data sh771x_data = {
+ .register_type = SH_ETH_REG_FAST_SH3_SH2,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tsu = 1,
};
@@ -1124,6 +1167,9 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
if (ret)
goto out;
+ if (mdp->cd->rmiimode)
+ sh_eth_write(ndev, 0x1, RMIIMODE);
+
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
@@ -1297,9 +1343,12 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
+ dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
+ mdp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
- netif_rx(skb);
+ netif_receive_skb(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
}
@@ -1857,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev)
pm_runtime_get_sync(&mdp->pdev->dev);
+ napi_enable(&mdp->napi);
+
ret = request_irq(ndev->irq, sh_eth_interrupt,
mdp->cd->irq_flags, ndev->name, ndev);
if (ret) {
dev_err(&ndev->dev, "Can not assign IRQ number\n");
- return ret;
+ goto out_napi_off;
}
/* Descriptor set */
@@ -1879,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
- napi_enable(&mdp->napi);
-
return ret;
out_free_irq:
free_irq(ndev->irq, ndev);
+out_napi_off:
+ napi_disable(&mdp->napi);
pm_runtime_put_sync(&mdp->pdev->dev);
return ret;
}
@@ -1976,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- napi_disable(&mdp->napi);
-
netif_stop_queue(ndev);
/* Disable interrupts by clearing the interrupt mask. */
@@ -1995,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
+ napi_disable(&mdp->napi);
+
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
@@ -2561,7 +2612,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp = NULL;
- struct sh_eth_plat_data *pd = pdev->dev.platform_data;
+ struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
const struct platform_device_id *id = platform_get_device_id(pdev);
/* get base addr */
@@ -2594,9 +2645,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
- /* Fill in the fields of the device structure with ethernet values. */
- ether_setup(ndev);
-
mdp = netdev_priv(ndev);
mdp->num_tx_ring = TX_RING_SIZE;
mdp->num_rx_ring = RX_RING_SIZE;
@@ -2618,10 +2666,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp->edmac_endian = pd->edmac_endian;
mdp->no_ether_link = pd->no_ether_link;
mdp->ether_link_active_low = pd->ether_link_active_low;
- mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
/* set cpu data */
mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
sh_eth_set_default_cpu_data(mdp->cd);
/* set function */
@@ -2749,6 +2797,7 @@ static struct platform_device_id sh_eth_id_table[] = {
{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
+ { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 99995bf38c40..a0db02c63b11 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -60,6 +60,7 @@ enum {
EDOCR,
TFUCR,
RFOCR,
+ RMIIMODE,
FCFTR,
RPADIR,
TRIMD,
@@ -156,6 +157,13 @@ enum {
SH_ETH_MAX_REGISTER_OFFSET,
};
+enum {
+ SH_ETH_REG_GIGABIT,
+ SH_ETH_REG_FAST_RCAR,
+ SH_ETH_REG_FAST_SH4,
+ SH_ETH_REG_FAST_SH3_SH2
+};
+
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
#define SH4_SKB_RX_ALIGN 32
@@ -453,6 +461,7 @@ struct sh_eth_cpu_data {
void (*set_rate)(struct net_device *ndev);
/* mandatory initialize value */
+ int register_type;
unsigned long eesipr_value;
/* optional initialize value */
@@ -482,6 +491,7 @@ struct sh_eth_cpu_data {
unsigned hw_crc:1; /* E-DMAC have CSMR */
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */
+ unsigned rmiimode:1; /* EtherC has RMIIMODE register */
};
struct sh_eth_private {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 856e523ac936..c76571886011 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -721,7 +721,7 @@ static const struct net_device_ops sgiseeq_netdev_ops = {
static int sgiseeq_probe(struct platform_device *pdev)
{
- struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
+ struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
struct hpc3_regs *hpcregs = pd->hpc;
struct sgiseeq_init_block *sr;
unsigned int irq = pd->irq;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 4136ccc4a954..8b7152565c5e 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,5 +1,5 @@
config SFC
- tristate "Solarflare SFC4000/SFC9000-family support"
+ tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
depends on PCI
select MDIO
select CRC32
@@ -8,12 +8,13 @@ config SFC
select PTP_1588_CLOCK
---help---
This driver supports 10-gigabit Ethernet cards based on
- the Solarflare SFC4000 and SFC9000-family controllers.
+ the Solarflare SFC4000, SFC9000-family and SFC9100-family
+ controllers.
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
- bool "Solarflare SFC4000/SFC9000-family MTD support"
+ bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
---help---
@@ -21,7 +22,7 @@ config SFC_MTD
(e.g. /dev/mtd1). This is required to update the firmware or
the boot configuration under Linux.
config SFC_MCDI_MON
- bool "Solarflare SFC9000-family hwmon support"
+ bool "Solarflare SFC9000/SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
default y
---help---
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 945bf06e69ef..3a83c0dca8e6 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,8 +1,7 @@
-sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
- falcon_xmac.o mcdi_mac.o \
- selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
+sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
+ rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
- mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
+ mcdi.o mcdi_port.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 5400a33f254f..17d83f37fbf2 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,10 @@
/* Lowest bit numbers and widths */
#define EFX_DUMMY_FIELD_LBN 0
#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
#define EFX_DWORD_0_LBN 0
#define EFX_DWORD_0_WIDTH 32
#define EFX_DWORD_1_LBN 32
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
new file mode 100644
index 000000000000..5f42313b4965
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -0,0 +1,3043 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "ef10_regs.h"
+#include "io.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+#include "workarounds.h"
+#include <linux/in.h>
+#include <linux/jhash.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/* Hardware control for EF10 architecture including 'Huntington'. */
+
+#define EFX_EF10_DRVGEN_EV 7
+enum {
+ EFX_EF10_TEST = 1,
+ EFX_EF10_REFILL,
+};
+
+/* The reserved RSS context value */
+#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+
+/* The filter table(s) are managed by firmware and we have write-only
+ * access. When removing filters we must identify them to the
+ * firmware by a 64-bit handle, but this is too wide for Linux kernel
+ * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
+ * be able to tell in advance whether a requested insertion will
+ * replace an existing filter. Therefore we maintain a software hash
+ * table, which should be at least as large as the hardware hash
+ * table.
+ *
+ * Huntington has a single 8K filter table shared between all filter
+ * types and both ports.
+ */
+#define HUNT_FILTER_TBL_ROWS 8192
+
+struct efx_ef10_filter_table {
+/* The RX match field masks supported by this fw & hw, in order of priority */
+ enum efx_filter_match_flags rx_match_flags[
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
+ unsigned int rx_match_count;
+
+ struct {
+ unsigned long spec; /* pointer to spec plus flag bits */
+/* BUSY flag indicates that an update is in progress. STACK_OLD is
+ * used to mark and sweep stack-owned MAC filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1UL
+#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
+#define EFX_EF10_FILTER_FLAGS 3UL
+ u64 handle; /* firmware handle */
+ } *entry;
+ wait_queue_head_t waitq;
+/* Shadow of net_device address lists, guarded by mac_lock */
+#define EFX_EF10_FILTER_STACK_UC_MAX 32
+#define EFX_EF10_FILTER_STACK_MC_MAX 256
+ struct {
+ u8 addr[ETH_ALEN];
+ u16 id;
+ } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
+ stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
+ int stack_uc_count; /* negative for PROMISC */
+ int stack_mc_count; /* negative for PROMISC/ALLMULTI */
+};
+
+/* An arbitrary search limit for the software hash table */
+#define EFX_EF10_FILTER_SEARCH_LIMIT 200
+
+static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
+static void efx_ef10_filter_table_remove(struct efx_nic *efx);
+
+static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
+ return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
+ EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
+}
+
+static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
+{
+ return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+}
+
+static int efx_ef10_init_capabilities(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen >= sizeof(outbuf)) {
+ nic_data->datapath_caps =
+ MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
+ netif_err(efx, drv, efx->net_dev,
+ "Capabilities don't indicate TSO support.\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+ rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
+ return rc > 0 ? rc : -ERANGE;
+}
+
+static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
+ return -EIO;
+
+ memcpy(mac_address,
+ MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
+ return 0;
+}
+
+static int efx_ef10_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data;
+ int i, rc;
+
+ /* We can have one VI for each 8K region. However we need
+ * multiple TX queues per channel.
+ */
+ efx->max_channels =
+ min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+ (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+ BUG_ON(efx->max_channels == 0);
+
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+
+ rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
+ 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
+ if (rc)
+ goto fail1;
+
+ /* Get the MC's warm boot count. In case it's rebooting right
+ * now, be prepared to retry.
+ */
+ i = 0;
+ for (;;) {
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0)
+ break;
+ if (++i == 5)
+ goto fail2;
+ ssleep(1);
+ }
+ nic_data->warm_boot_count = rc;
+
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* In case we're recovering from a crash (kexec), we want to
+ * cancel any outstanding request by the previous user of this
+ * function. We send a special message using the least
+ * significant bits of the 'high' (doorbell) register.
+ */
+ _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
+
+ rc = efx_mcdi_init(efx);
+ if (rc)
+ goto fail2;
+
+ /* Reset (most) configuration for this function */
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ goto fail3;
+
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_init_capabilities(efx);
+ if (rc < 0)
+ goto fail3;
+
+ efx->rx_packet_len_offset =
+ ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
+ netif_err(efx, probe, efx->net_dev,
+ "current firmware does not support an RX prefix\n");
+ rc = -ENODEV;
+ goto fail3;
+ }
+
+ rc = efx_mcdi_port_get_number(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->port_num = rc;
+
+ rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_get_sysclk_freq(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
+
+ /* Check whether firmware supports bug 35388 workaround */
+ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+ if (rc == 0)
+ nic_data->workaround_35388 = true;
+ else if (rc != -ENOSYS && rc != -ENOENT)
+ goto fail3;
+ netif_dbg(efx, probe, efx->net_dev,
+ "workaround for bug 35388 is %sabled\n",
+ nic_data->workaround_35388 ? "en" : "dis");
+
+ rc = efx_mcdi_mon_probe(efx);
+ if (rc)
+ goto fail3;
+
+ efx_ptp_probe(efx);
+
+ return 0;
+
+fail3:
+ efx_mcdi_fini(efx);
+fail2:
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+fail1:
+ kfree(nic_data);
+ efx->nic_data = NULL;
+ return rc;
+}
+
+static int efx_ef10_free_vis(struct efx_nic *efx)
+{
+ int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+
+ /* -EALREADY means nothing to free, so ignore */
+ if (rc == -EALREADY)
+ rc = 0;
+ return rc;
+}
+
+static void efx_ef10_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ efx_mcdi_mon_remove(efx);
+
+ /* This needs to be after efx_ptp_remove_channel() with no filters */
+ efx_ef10_rx_free_indir_table(efx);
+
+ rc = efx_ef10_free_vis(efx);
+ WARN_ON(rc != 0);
+
+ efx_mcdi_fini(efx);
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+ kfree(nic_data);
+}
+
+static int efx_ef10_alloc_vis(struct efx_nic *efx,
+ unsigned int min_vis, unsigned int max_vis)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
+ return -EIO;
+
+ netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
+ MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
+
+ nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
+ nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
+ return 0;
+}
+
+static int efx_ef10_dimension_resources(struct efx_nic *efx)
+{
+ unsigned int n_vis =
+ max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+ return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+}
+
+static int efx_ef10_init_nic(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (nic_data->must_realloc_vis) {
+ /* We cannot let the number of VIs change now */
+ rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
+ nic_data->n_allocated_vis);
+ if (rc)
+ return rc;
+ nic_data->must_realloc_vis = false;
+ }
+
+ efx_ef10_rx_push_indir_table(efx);
+ return 0;
+}
+
+static int efx_ef10_map_reset_flags(u32 *flags)
+{
+ enum {
+ EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
+ ETH_RESET_SHARED_SHIFT),
+ EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC |
+ ETH_RESET_PHY | ETH_RESET_MGMT) <<
+ ETH_RESET_SHARED_SHIFT)
+ };
+
+ /* We assume for now that our PCI function is permitted to
+ * reset everything.
+ */
+
+ if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
+ *flags &= ~EF10_RESET_MC;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
+ *flags &= ~EF10_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+
+ /* no invisible reset implemented */
+
+ return -EINVAL;
+}
+
+#define EF10_DMA_STAT(ext_name, mcdi_name) \
+ [EF10_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
+ [EF10_STAT_ ## int_name] = \
+ { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_OTHER_STAT(ext_name) \
+ [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
+ EF10_DMA_STAT(tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(tx_packets, TX_PKTS),
+ EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(rx_good_bytes),
+ EF10_OTHER_STAT(rx_bad_bytes),
+ EF10_DMA_STAT(rx_packets, RX_PKTS),
+ EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+};
+
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
+ (1ULL << EF10_STAT_tx_packets) | \
+ (1ULL << EF10_STAT_tx_pause) | \
+ (1ULL << EF10_STAT_tx_unicast) | \
+ (1ULL << EF10_STAT_tx_multicast) | \
+ (1ULL << EF10_STAT_tx_broadcast) | \
+ (1ULL << EF10_STAT_rx_bytes) | \
+ (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_rx_good_bytes) | \
+ (1ULL << EF10_STAT_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_rx_packets) | \
+ (1ULL << EF10_STAT_rx_good) | \
+ (1ULL << EF10_STAT_rx_bad) | \
+ (1ULL << EF10_STAT_rx_pause) | \
+ (1ULL << EF10_STAT_rx_control) | \
+ (1ULL << EF10_STAT_rx_unicast) | \
+ (1ULL << EF10_STAT_rx_multicast) | \
+ (1ULL << EF10_STAT_rx_broadcast) | \
+ (1ULL << EF10_STAT_rx_lt64) | \
+ (1ULL << EF10_STAT_rx_64) | \
+ (1ULL << EF10_STAT_rx_65_to_127) | \
+ (1ULL << EF10_STAT_rx_128_to_255) | \
+ (1ULL << EF10_STAT_rx_256_to_511) | \
+ (1ULL << EF10_STAT_rx_512_to_1023) | \
+ (1ULL << EF10_STAT_rx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
+ (1ULL << EF10_STAT_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_overflow) | \
+ (1ULL << EF10_STAT_rx_nodesc_drops))
+
+/* These statistics are only provided by the 10G MAC. For a 10G/40G
+ * switchable port we do not expose these because they might not
+ * include all the packets they should.
+ */
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
+ (1ULL << EF10_STAT_tx_lt64) | \
+ (1ULL << EF10_STAT_tx_64) | \
+ (1ULL << EF10_STAT_tx_65_to_127) | \
+ (1ULL << EF10_STAT_tx_128_to_255) | \
+ (1ULL << EF10_STAT_tx_256_to_511) | \
+ (1ULL << EF10_STAT_tx_512_to_1023) | \
+ (1ULL << EF10_STAT_tx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+
+/* These statistics are only provided by the 40G MAC. For a 10G/40G
+ * switchable port we do expose these because the errors will otherwise
+ * be silent.
+ */
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
+ (1ULL << EF10_STAT_rx_length_error))
+
+#if BITS_PER_LONG == 64
+#define STAT_MASK_BITMAP(bits) (bits)
+#else
+#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
+#endif
+
+static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
+{
+ static const unsigned long hunt_40g_stat_mask[] = {
+ STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
+ HUNT_40G_EXTRA_STAT_MASK)
+ };
+ static const unsigned long hunt_10g_only_stat_mask[] = {
+ STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
+ HUNT_10G_ONLY_STAT_MASK)
+ };
+ u32 port_caps = efx_mcdi_phy_get_caps(efx);
+
+ if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ return hunt_40g_stat_mask;
+ else
+ return hunt_10g_only_stat_mask;
+}
+
+static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
+ efx_ef10_stat_mask(efx), names);
+}
+
+static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ __le64 *dma_stats;
+
+ dma_stats = efx->stats_buffer.addr;
+ nic_data = efx->nic_data;
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
+ return 0;
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+ stats, efx->stats_buffer.addr, false);
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start)
+ return -EAGAIN;
+
+ /* Update derived statistics */
+ stats[EF10_STAT_rx_good_bytes] =
+ stats[EF10_STAT_rx_bytes] -
+ stats[EF10_STAT_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
+ stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+
+ return 0;
+}
+
+
+static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ const unsigned long *mask = efx_ef10_stat_mask(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+ int retry;
+
+ /* If we're unlucky enough to read statistics during the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us)
+ */
+ for (retry = 0; retry < 100; ++retry) {
+ if (efx_ef10_try_update_nic_stats(efx) == 0)
+ break;
+ udelay(100);
+ }
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[EF10_STAT_rx_packets];
+ core_stats->tx_packets = stats[EF10_STAT_tx_packets];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF10_STAT_rx_gtjumbo] +
+ stats[EF10_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
+ }
+
+ return stats_count;
+}
+
+static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int mode, value;
+ efx_dword_t timer_cmd;
+
+ if (channel->irq_moderation) {
+ mode = 3;
+ value = channel->irq_moderation - 1;
+ } else {
+ mode = 0;
+ value = 0;
+ }
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
+ channel->channel);
+ }
+}
+
+static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
+{
+ if (type != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static void efx_ef10_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(pdu, hdr, hdr_len);
+ memcpy(pdu + hdr_len, sdu, sdu_len);
+ wmb();
+
+ /* The hardware provides 'low' and 'high' (doorbell) registers
+ * for passing the 64-bit address of an MCDI request to
+ * firmware. However the dwords are swapped by firmware. The
+ * least significant bits of the doorbell are then 0 for all
+ * MCDI requests due to alignment.
+ */
+ _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
+ ER_DZ_MC_DB_LWRD);
+ _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
+ ER_DZ_MC_DB_HWRD);
+}
+
+static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
+
+ rmb();
+ return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void
+efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(outbuf, pdu + offset, outlen);
+}
+
+static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc < 0) {
+ /* The firmware is presumably in the process of
+ * rebooting. However, we are supposed to report each
+ * reboot just once, so we must only do that once we
+ * can read and store the updated warm boot count.
+ */
+ return 0;
+ }
+
+ if (rc == nic_data->warm_boot_count)
+ return 0;
+
+ nic_data->warm_boot_count = rc;
+
+ /* All our allocations have been reset */
+ nic_data->must_realloc_vis = true;
+ nic_data->must_restore_filters = true;
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ return -EIO;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
+
+ if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
+ /* Note test interrupts */
+ if (context->index == efx->irq_level)
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
+ queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
+
+ if (queues == 0)
+ return IRQ_NONE;
+
+ if (likely(soft_enabled)) {
+ /* Note test interrupts */
+ if (queues & (1U << efx->irq_level))
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return IRQ_HANDLED;
+}
+
+static void efx_ef10_irq_test_generate(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
+ (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ (tx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+/* This writes to the TX_DESC_WPTR and also pushes data */
+static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned int write_ptr;
+ efx_oword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
+ bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
+ struct efx_channel *channel = tx_queue->channel;
+ struct efx_nic *efx = tx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ efx_qword_t *txd;
+ int rc;
+ int i;
+
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = tx_queue->txd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
+ tx_queue->queue, entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ /* A previous user of this TX queue might have set us up the
+ * bomb by writing a descriptor to the TX push collector but
+ * not the doorbell. (Each collector belongs to a port, not a
+ * queue or function, so cannot easily be reset.) We must
+ * attempt to push a no-op descriptor in its place.
+ */
+ tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
+ tx_queue->insert_count = 1;
+ txd = efx_tx_desc(tx_queue, 0);
+ EFX_POPULATE_QWORD_4(*txd,
+ ESF_DZ_TX_DESC_IS_OPT, true,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
+ tx_queue->write_count = 1;
+ wmb();
+ efx_ef10_push_tx_desc(tx_queue, txd);
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+ struct efx_nic *efx = tx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
+ tx_queue->queue);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
+}
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned int write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
+{
+ unsigned int old_write_count = tx_queue->write_count;
+ struct efx_tx_buffer *buffer;
+ unsigned int write_ptr;
+ efx_qword_t *txd;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ /* Create TX descriptor ring entry */
+ if (buffer->flags & EFX_TX_BUF_OPTION) {
+ *txd = buffer->option;
+ } else {
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_3(
+ *txd,
+ ESF_DZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
+ ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ }
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_ef10_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_ef10_notify_tx_desc(tx_queue);
+ }
+}
+
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
+ EFX_MAX_CHANNELS);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+
+ return 0;
+}
+
+static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
+ context);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ WARN_ON(rc != 0);
+}
+
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
+ int i, rc;
+
+ MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
+ MCDI_PTR(tablebuf,
+ RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
+ (u8) efx->rx_indir_table[i];
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
+ sizeof(tablebuf), NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
+ efx->rx_hash_key[i];
+
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
+ sizeof(keybuf), NULL, 0, NULL);
+}
+
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+ efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
+static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
+ rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+ }
+
+ rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
+ (rx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = rx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+ MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = rx_queue->rxd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
+ efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+ struct efx_nic *efx = rx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
+}
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_2(*rxd,
+ ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
+ ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int write_count;
+ efx_dword_t reg;
+
+ /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
+ write_count = rx_queue->added_count & ~7;
+ if (rx_queue->notified_count == write_count)
+ return;
+
+ do
+ efx_ef10_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ while (++rx_queue->notified_count != write_count);
+
+ wmb();
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
+ write_count & rx_queue->ptr_mask);
+ efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
+ efx_rx_queue_index(rx_queue));
+}
+
+static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
+
+static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_REFILL);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
+ inbuf, sizeof(inbuf), 0,
+ efx_ef10_rx_defer_refill_complete, 0);
+}
+
+static void
+efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ /* nothing to do */
+}
+
+static int efx_ef10_ev_probe(struct efx_channel *channel)
+{
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static int efx_ef10_ev_init(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
+ size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = channel->efx;
+ struct efx_ef10_nic_data *nic_data;
+ bool supports_rx_merge;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ nic_data = efx->nic_data;
+ supports_rx_merge =
+ !!(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
+ /* INIT_EVQ expects index in vector table, not absolute */
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = channel->eventq.buf.dma_addr;
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ /* IRQ return is ignored */
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_ev_remove(struct efx_channel *channel)
+{
+ efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
+}
+
+static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
+ unsigned int rx_queue_label)
+{
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "rx event arrived on queue %d labeled as queue %u\n",
+ efx_rx_queue_index(rx_queue), rx_queue_label);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+static void
+efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
+ unsigned int actual, unsigned int expected)
+{
+ unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, actual, expected);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+/* partially received RX was aborted. clean up. */
+static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
+{
+ unsigned int rx_desc_ptr;
+
+ WARN_ON(rx_queue->scatter_n == 0);
+
+ netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
+ "scattered RX aborted (dropping %u buffers)\n",
+ rx_queue->scatter_n);
+
+ rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
+
+ efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
+ 0, EFX_RX_PKT_DISCARD);
+
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+ ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
+}
+
+static int efx_ef10_handle_rx_event(struct efx_channel *channel,
+ const efx_qword_t *event)
+{
+ unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
+ unsigned int n_descs, n_packets, i;
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue;
+ bool rx_cont;
+ u16 flags = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ /* Basic packet information */
+ rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
+ next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
+ rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
+ rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+ rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
+
+ WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
+ efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
+
+ n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+
+ if (n_descs != rx_queue->scatter_n + 1) {
+ /* detect rx abort */
+ if (unlikely(n_descs == rx_queue->scatter_n)) {
+ WARN_ON(rx_bytes != 0);
+ efx_ef10_handle_rx_abort(rx_queue);
+ return 0;
+ }
+
+ if (unlikely(rx_queue->scatter_n != 0)) {
+ /* Scattered packet completions cannot be
+ * merged, so something has gone wrong.
+ */
+ efx_ef10_handle_rx_bad_lbits(
+ rx_queue, next_ptr_lbits,
+ (rx_queue->removed_count +
+ rx_queue->scatter_n + 1) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+ return 0;
+ }
+
+ /* Merged completion for multiple non-scattered packets */
+ rx_queue->scatter_n = 1;
+ rx_queue->scatter_len = 0;
+ n_packets = n_descs;
+ ++channel->n_rx_merge_events;
+ channel->n_rx_merge_packets += n_packets;
+ flags |= EFX_RX_PKT_PREFIX_LEN;
+ } else {
+ ++rx_queue->scatter_n;
+ rx_queue->scatter_len += rx_bytes;
+ if (rx_cont)
+ return 0;
+ n_packets = 1;
+ }
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
+ flags |= EFX_RX_PKT_DISCARD;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
+ channel->n_rx_ip_hdr_chksum_err += n_packets;
+ } else if (unlikely(EFX_QWORD_FIELD(*event,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
+ channel->n_rx_tcp_udp_chksum_err += n_packets;
+ } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
+ rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
+ flags |= EFX_RX_PKT_CSUMMED;
+ }
+
+ if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+ flags |= EFX_RX_PKT_TCP;
+
+ channel->irq_mod_score += 2 * n_packets;
+
+ /* Handle received packet(s) */
+ for (i = 0; i < n_packets; i++) {
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_queue->scatter_len,
+ flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ }
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ return n_packets;
+}
+
+static int
+efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_tx_queue *tx_queue;
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ int tx_descs = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
+ return 0;
+
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
+ tx_queue = efx_channel_get_tx_queue(channel,
+ tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+
+ return tx_descs;
+}
+
+static void
+efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ int subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
+
+ switch (subcode) {
+ case ESE_DZ_DRV_TIMER_EV:
+ case ESE_DZ_DRV_WAKE_UP_EV:
+ break;
+ case ESE_DZ_DRV_START_UP_EV:
+ /* event queue init complete. ok. */
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, subcode,
+ EFX_QWORD_VAL(*event));
+
+ }
+}
+
+static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
+
+ switch (subcode) {
+ case EFX_EF10_TEST:
+ channel->event_test_cpu = raw_smp_processor_id();
+ break;
+ case EFX_EF10_REFILL:
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here
+ */
+ efx_fast_push_rx_descriptors(&channel->rx_queue);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %u"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, (unsigned) subcode,
+ EFX_QWORD_VAL(*event));
+ }
+}
+
+static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event, *p_event;
+ unsigned int read_ptr;
+ int ev_code;
+ int tx_descs = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ break;
+
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
+
+ netif_vdbg(efx, drv, efx->net_dev,
+ "processing event on %d " EFX_QWORD_FMT "\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ switch (ev_code) {
+ case ESE_DZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case ESE_DZ_EV_CODE_RX_EV:
+ spent += efx_ef10_handle_rx_event(channel, &event);
+ if (spent >= quota) {
+ /* XXX can we split a merged event to
+ * avoid going over-quota?
+ */
+ spent = quota;
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_TX_EV:
+ tx_descs += efx_ef10_handle_tx_event(channel, &event);
+ if (tx_descs > efx->txq_entries) {
+ spent = quota;
+ goto out;
+ } else if (++spent == quota) {
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_DRIVER_EV:
+ efx_ef10_handle_driver_event(channel, &event);
+ if (++spent == quota)
+ goto out;
+ break;
+ case EFX_EF10_DRVGEN_EV:
+ efx_ef10_handle_driver_generated_event(channel, &event);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, ev_code,
+ EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+static void efx_ef10_ev_read_ack(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_dword_t rptr;
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (channel->eventq_read_ptr &
+ channel->eventq_mask) >>
+ ERF_DD_EVQ_IND_RPTR_WIDTH);
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ channel->eventq_read_ptr &
+ ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
+ channel->eventq_read_ptr &
+ channel->eventq_mask);
+ efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
+ }
+}
+
+static void efx_ef10_ev_test_generate(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event;
+ int rc;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_TEST);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+void efx_ef10_handle_drain_event(struct efx_nic *efx)
+{
+ if (atomic_dec_and_test(&efx->active_queues))
+ wake_up(&efx->flush_wq);
+
+ WARN_ON(atomic_read(&efx->active_queues) < 0);
+}
+
+static int efx_ef10_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int pending;
+
+ /* If the MC has just rebooted, the TX/RX queues will have already been
+ * torn down, but efx->active_queues needs to be set to zero.
+ */
+ if (nic_data->must_realloc_vis) {
+ atomic_set(&efx->active_queues, 0);
+ return 0;
+ }
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_ef10_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_ef10_tx_fini(tx_queue);
+ }
+
+ wait_event_timeout(efx->flush_wq,
+ atomic_read(&efx->active_queues) == 0,
+ msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
+ pending = atomic_read(&efx->active_queues);
+ if (pending) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
+ pending);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+ /* XXX should we randomise the initval? */
+}
+
+/* Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
+ !is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ !ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] != 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+static struct efx_filter_spec *
+efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
+ ~EFX_EF10_FILTER_FLAGS);
+}
+
+static unsigned int
+efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
+}
+
+static void
+efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
+ unsigned int filter_idx,
+ const struct efx_filter_spec *spec,
+ unsigned int flags)
+{
+ table->entry[filter_idx].spec = (unsigned long)spec | flags;
+}
+
+static void efx_ef10_filter_push_prep(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf, u64 handle,
+ bool replacing)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+
+ if (replacing) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
+ } else {
+ u32 match_fields = 0;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /* Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(spec->gen_field)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &spec->gen_field, sizeof(spec->gen_field)); \
+ }
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+ }
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
+ (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
+ spec->rss_context !=
+ EFX_FILTER_RSS_CONTEXT_DEFAULT ?
+ spec->rss_context : nic_data->rx_rss_context);
+}
+
+static int efx_ef10_filter_push(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ u64 *handle, bool replacing)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
+ int rc;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0)
+ *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ return rc;
+}
+
+static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++)
+ if (table->rx_match_flags[match_pri] == match_flags)
+ return match_pri;
+
+ return -EPROTONOSUPPORT;
+}
+
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_filter_spec *saved_spec;
+ unsigned int match_pri, hash;
+ unsigned int priv_flags;
+ bool replacing = false;
+ int ins_index = -1;
+ DEFINE_WAIT(wait);
+ bool is_mc_recip;
+ s32 rc;
+
+ /* For now, only support RX filters */
+ if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
+ EFX_FILTER_FLAG_RX)
+ return -EINVAL;
+
+ rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
+ if (rc < 0)
+ return rc;
+ match_pri = rc;
+
+ hash = efx_ef10_filter_hash(spec);
+ is_mc_recip = efx_filter_is_mc_recipient(spec);
+ if (is_mc_recip)
+ bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+
+ /* Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ unsigned int depth = 1;
+ unsigned int i;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec &
+ EFX_EF10_FILTER_FLAG_BUSY)
+ break;
+ if (spec->priority < saved_spec->priority &&
+ !(saved_spec->priority ==
+ EFX_FILTER_PRI_REQUIRED &&
+ saved_spec->flags &
+ EFX_FILTER_FLAG_RX_STACK)) {
+ rc = -EPERM;
+ goto out_unlock;
+ }
+ if (!is_mc_recip) {
+ /* This is the only one */
+ if (spec->priority ==
+ saved_spec->priority &&
+ !replace_equal) {
+ rc = -EEXIST;
+ goto out_unlock;
+ }
+ ins_index = i;
+ goto found;
+ } else if (spec->priority >
+ saved_spec->priority ||
+ (spec->priority ==
+ saved_spec->priority &&
+ replace_equal)) {
+ if (ins_index < 0)
+ ins_index = i;
+ else
+ __set_bit(depth, mc_rem_map);
+ }
+ }
+
+ /* Once we reach the maximum search depth, use
+ * the first suitable slot or return -EBUSY if
+ * there was none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+ goto found;
+ }
+
+ ++depth;
+ }
+
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+
+found:
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ /* Just make sure it won't be removed */
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ table->entry[ins_index].spec &=
+ ~EFX_EF10_FILTER_FLAG_STACK_OLD;
+ rc = ins_index;
+ goto out_unlock;
+ }
+ replacing = true;
+ priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ *saved_spec = *spec;
+ priv_flags = 0;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
+
+ /* Mark lower-priority multicast recipients busy prior to removal */
+ if (is_mc_recip) {
+ unsigned int depth, i;
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ if (test_bit(depth, mc_rem_map))
+ table->entry[i].spec |=
+ EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
+ replacing);
+
+ /* Finalise the software table entry */
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ if (replacing) {
+ /* Update the fields that may differ */
+ saved_spec->priority = spec->priority;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
+ saved_spec->flags |= spec->flags;
+ saved_spec->rss_context = spec->rss_context;
+ saved_spec->dmaq_id = spec->dmaq_id;
+ }
+ } else if (!replacing) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Remove and finalise entries for lower-priority multicast
+ * recipients
+ */
+ if (is_mc_recip) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ unsigned int depth, i;
+
+ memset(inbuf, 0, sizeof(inbuf));
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ if (!test_bit(depth, mc_rem_map))
+ continue;
+
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+ priv_flags = efx_ef10_filter_entry_flags(table, i);
+
+ if (rc == 0) {
+ spin_unlock_bh(&efx->filter_lock);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[i].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->filter_lock);
+ }
+
+ if (rc == 0) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ priv_flags = 0;
+ } else {
+ priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ efx_ef10_filter_set_entry(table, i, saved_spec,
+ priv_flags);
+ }
+ }
+
+ /* If successful, return the inserted filter ID */
+ if (rc == 0)
+ rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
+
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ /* no need to do anything here on EF10 */
+}
+
+/* Remove a filter.
+ * If !stack_requested, remove by ID
+ * If stack_requested, remove by index
+ * Filter ID may come from userland and must be range-checked.
+ */
+static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, bool stack_requested)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+ struct efx_filter_spec *spec;
+ DEFINE_WAIT(wait);
+ int rc;
+
+ /* Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ */
+ for (;;) {
+ spin_lock_bh(&efx->filter_lock);
+ if (!(table->entry[filter_idx].spec &
+ EFX_EF10_FILTER_FLAG_BUSY))
+ break;
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec || spec->priority > priority ||
+ (!stack_requested &&
+ efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
+ filter_id / HUNT_FILTER_TBL_ROWS)) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
+ /* Reset steering of a stack-owned filter */
+
+ struct efx_filter_spec new_spec = *spec;
+
+ new_spec.priority = EFX_FILTER_PRI_REQUIRED;
+ new_spec.flags = (EFX_FILTER_FLAG_RX |
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK);
+ new_spec.dmaq_id = 0;
+ new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
+ rc = efx_ef10_filter_push(efx, &new_spec,
+ &table->entry[filter_idx].handle,
+ true);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0)
+ *spec = new_spec;
+ } else {
+ /* Really remove the filter */
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ }
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
+}
+
+static int efx_ef10_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ const struct efx_filter_spec *saved_spec;
+ int rc;
+
+ spin_lock_bh(&efx->filter_lock);
+ saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (saved_spec && saved_spec->priority == priority &&
+ efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
+ filter_id / HUNT_FILTER_TBL_ROWS) {
+ *spec = *saved_spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ /* TODO */
+}
+
+static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ if (table->entry[filter_idx].spec &&
+ efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
+ priority)
+ ++count;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+
+ return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
+}
+
+static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (spec && spec->priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ break;
+ }
+ buf[count++] = (efx_ef10_filter_rx_match_pri(
+ table, spec->match_flags) *
+ HUNT_FILTER_TBL_ROWS +
+ filter_idx);
+ }
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
+
+static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *saved_spec;
+ unsigned int hash, i, depth = 1;
+ bool replacing = false;
+ int ins_index = -1;
+ u64 cookie;
+ s32 rc;
+
+ /* Must be an RX filter without RSS and not for a multicast
+ * destination address (RFS only works for connected sockets).
+ * These restrictions allow us to pass only a tiny amount of
+ * data through to the completion function.
+ */
+ EFX_WARN_ON_PARANOID(spec->flags !=
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
+ EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
+ EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
+
+ hash = efx_ef10_filter_hash(spec);
+
+ spin_lock_bh(&efx->filter_lock);
+
+ /* Find any existing filter with the same match tuple or else
+ * a free slot to insert at. If an existing filter is busy,
+ * we have to give up.
+ */
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ EFX_WARN_ON_PARANOID(saved_spec->flags &
+ EFX_FILTER_FLAG_RX_STACK);
+ if (spec->priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto fail_unlock;
+ }
+ ins_index = i;
+ break;
+ }
+
+ /* Once we reach the maximum search depth, use the
+ * first suitable slot or return -EBUSY if there was
+ * none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ break;
+ }
+
+ ++depth;
+ }
+
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ replacing = true;
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto fail_unlock;
+ }
+ *saved_spec = *spec;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ EFX_EF10_FILTER_FLAG_BUSY);
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Pack up the variables needed on completion */
+ cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf,
+ table->entry[ins_index].handle, replacing);
+ efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ MC_CMD_FILTER_OP_OUT_LEN,
+ efx_ef10_filter_rfs_insert_complete, cookie);
+
+ return ins_index;
+
+fail_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int ins_index, dmaq_id;
+ struct efx_filter_spec *spec;
+ bool replacing;
+
+ /* Unpack the cookie */
+ replacing = cookie >> 31;
+ ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
+ dmaq_id = cookie & 0xffff;
+
+ spin_lock_bh(&efx->filter_lock);
+ spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (rc == 0) {
+ table->entry[ins_index].handle =
+ MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (replacing)
+ spec->dmaq_id = dmaq_id;
+ } else if (!replacing) {
+ kfree(spec);
+ spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, spec, 0);
+ spin_unlock_bh(&efx->filter_lock);
+
+ wake_up_all(&table->waitq);
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual);
+
+static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+
+ if (!spec ||
+ (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
+ spec->priority != EFX_FILTER_PRI_HINT ||
+ !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
+ flow_id, filter_idx))
+ return false;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
+ efx_ef10_filter_rfs_expire_complete, filter_idx))
+ return false;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ return true;
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
+{
+ int match_flags = 0;
+
+#define MAP_FLAG(gen_flag, mcdi_field) { \
+ u32 old_mcdi_flags = mcdi_flags; \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ if (mcdi_flags != old_mcdi_flags) \
+ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ }
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
+#undef MAP_FLAG
+
+ /* Did we map them all? */
+ if (mcdi_flags)
+ return -EINVAL;
+
+ return match_flags;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ unsigned int pd_match_pri, pd_match_count;
+ struct efx_ef10_filter_table *table;
+ size_t outlen;
+ int rc;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ /* Find out which RX filter types are supported, and their priorities */
+ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ pd_match_count = MCDI_VAR_ARRAY_LEN(
+ outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
+ table->rx_match_count = 0;
+
+ for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
+ u32 mcdi_flags =
+ MCDI_ARRAY_DWORD(
+ outbuf,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
+ pd_match_pri);
+ rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
+ if (rc < 0) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u not supported in driver\n",
+ __func__, mcdi_flags, pd_match_pri);
+ } else {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
+ __func__, mcdi_flags, pd_match_pri,
+ rc, table->rx_match_count);
+ table->rx_match_flags[table->rx_match_count++] = rc;
+ }
+ }
+
+ table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
+ if (!table->entry) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ efx->filter_state = table;
+ init_waitqueue_head(&table->waitq);
+ return 0;
+
+fail:
+ kfree(table);
+ return rc;
+}
+
+static void efx_ef10_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ bool failed = false;
+ int rc;
+
+ if (!nic_data->must_restore_filters)
+ return;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec,
+ &table->entry[filter_idx].handle,
+ false);
+ if (rc)
+ failed = true;
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ } else {
+ table->entry[filter_idx].spec &=
+ ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (failed)
+ netif_err(efx, hw, efx->net_dev,
+ "unable to restore all filters\n");
+ else
+ nic_data->must_restore_filters = false;
+}
+
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ WARN_ON(rc != 0);
+ kfree(spec);
+ }
+
+ vfree(table->entry);
+ kfree(table);
+}
+
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_filter_spec spec;
+ bool remove_failed = false;
+ struct netdev_hw_addr *uc;
+ struct netdev_hw_addr *mc;
+ unsigned int filter_idx;
+ int i, n, rc;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ /* Mark old filters that may need to be removed */
+ spin_lock_bh(&efx->filter_lock);
+ n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ }
+ n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ if (net_dev->flags & IFF_PROMISC ||
+ netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
+ table->stack_uc_count = -1;
+ } else {
+ table->stack_uc_count = 1 + netdev_uc_count(net_dev);
+ memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
+ ETH_ALEN);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ memcpy(table->stack_uc_list[i].addr,
+ uc->addr, ETH_ALEN);
+ i++;
+ }
+ }
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
+ netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
+ table->stack_mc_count = -1;
+ } else {
+ table->stack_mc_count = 1 + netdev_mc_count(net_dev);
+ eth_broadcast_addr(table->stack_mc_list[0].addr);
+ i = 1;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ memcpy(table->stack_mc_list[i].addr,
+ mc->addr, ETH_ALEN);
+ i++;
+ }
+ }
+ netif_addr_unlock_bh(net_dev);
+
+ /* Insert/renew unicast filters */
+ if (table->stack_uc_count >= 0) {
+ for (i = 0; i < table->stack_uc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->stack_uc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to unicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_REQUIRED,
+ table->stack_uc_list[i].id);
+ table->stack_uc_count = -1;
+ break;
+ }
+ table->stack_uc_list[i].id = rc;
+ }
+ }
+ if (table->stack_uc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_uc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->stack_uc_count = 0;
+ } else {
+ table->stack_uc_list[0].id = rc;
+ }
+ }
+
+ /* Insert/renew multicast filters */
+ if (table->stack_mc_count >= 0) {
+ for (i = 0; i < table->stack_mc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->stack_mc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to multicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_REQUIRED,
+ table->stack_mc_list[i].id);
+ table->stack_mc_count = -1;
+ break;
+ }
+ table->stack_mc_list[i].id = rc;
+ }
+ }
+ if (table->stack_mc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_mc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->stack_mc_count = 0;
+ } else {
+ table->stack_mc_list[0].id = rc;
+ }
+ }
+
+ /* Remove filters that weren't renewed. Since nothing else
+ * changes the STACK_OLD flag or removes these filters, we
+ * don't need to hold the filter_lock while scanning for
+ * these filters.
+ */
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ if (ACCESS_ONCE(table->entry[i].spec) &
+ EFX_EF10_FILTER_FLAG_STACK_OLD) {
+ if (efx_ef10_filter_remove_internal(efx,
+ EFX_FILTER_PRI_REQUIRED,
+ i, true) < 0)
+ remove_failed = true;
+ }
+ }
+ WARN_ON(remove_failed);
+}
+
+static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
+{
+ efx_ef10_filter_sync_rx_mode(efx);
+
+ return efx_mcdi_set_mac(efx);
+}
+
+#ifdef CONFIG_SFC_MTD
+
+struct efx_ef10_nvram_type_info {
+ u16 type, type_mask;
+ u8 port;
+ const char *name;
+};
+
+static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
+ { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
+ { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
+ { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
+};
+
+static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
+ const struct efx_ef10_nvram_type_info *info;
+ size_t size, erase_size, outlen;
+ bool protected;
+ int rc;
+
+ for (info = efx_ef10_nvram_types; ; info++) {
+ if (info ==
+ efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
+ return -ENODEV;
+ if ((type & ~info->type_mask) == info->type)
+ break;
+ }
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
+ (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
+ part->fw_subtype = MCDI_DWORD(outbuf,
+ NVRAM_METADATA_OUT_SUBTYPE);
+
+ part->common.dev_type_name = "EF10 NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int efx_ef10_mtd_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
+ struct efx_mcdi_mtd_partition *parts;
+ size_t outlen, n_parts_total, i, n_parts;
+ unsigned int type;
+ int rc;
+
+ ASSERT_RTNL();
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
+ return -EIO;
+
+ n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+ if (n_parts_total >
+ MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
+ return -EIO;
+
+ parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ n_parts = 0;
+ for (i = 0; i < n_parts_total; i++) {
+ type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
+ i);
+ rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
+}
+
+const struct efx_nic_type efx_hunt_a0_nic_type = {
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_mcdi_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_ef10_fini_dmaq,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ /* TODO: test_chip */
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .irq_handle_legacy = efx_ef10_legacy_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_remove = efx_ef10_tx_remove,
+ .tx_write = efx_ef10_tx_write,
+ .rx_push_indir_table = efx_ef10_rx_push_indir_table,
+ .rx_probe = efx_ef10_rx_probe,
+ .rx_init = efx_ef10_rx_init,
+ .rx_remove = efx_ef10_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .ev_probe = efx_ef10_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_ef10_ev_fini,
+ .ev_remove = efx_ef10_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_ef10_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+ .filter_insert = efx_ef10_filter_insert,
+ .filter_remove_safe = efx_ef10_filter_remove_safe,
+ .filter_get_safe = efx_ef10_filter_get_safe,
+ .filter_clear_rx = efx_ef10_filter_clear_rx,
+ .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+
+ .revision = EFX_REV_HUNT_A0,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+};
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
new file mode 100644
index 000000000000..b3f4e3755fd9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -0,0 +1,415 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF10_REGS_H
+#define EFX_EF10_REGS_H
+
+/* EF10 hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * E<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register Host memory structure
+ * -------------------------------------------------------------
+ * Address R
+ * Bitfield RF SF
+ * Enumerator FE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * D: Huntington A0
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* BIU_HW_REV_ID_REG: */
+#define ER_DZ_BIU_HW_REV_ID 0x00000000
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+/* BIU_MC_SFT_STATUS_REG: */
+#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010
+#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+/* BIU_INT_ISR_REG: */
+#define ER_DZ_BIU_INT_ISR 0x00000090
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+/* MC_DB_LWRD_REG: */
+#define ER_DZ_MC_DB_LWRD 0x00000200
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+/* MC_DB_HWRD_REG: */
+#define ER_DZ_MC_DB_HWRD 0x00000204
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+/* EVQ_RPTR_REG: */
+#define ER_DZ_EVQ_RPTR 0x00000400
+#define ER_DZ_EVQ_RPTR_STEP 8192
+#define ER_DZ_EVQ_RPTR_ROWS 2048
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+/* EVQ_TMR_REG: */
+#define ER_DZ_EVQ_TMR 0x00000420
+#define ER_DZ_EVQ_TMR_STEP 8192
+#define ER_DZ_EVQ_TMR_ROWS 2048
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+/* RX_DESC_UPD_REG: */
+#define ER_DZ_RX_DESC_UPD 0x00000830
+#define ER_DZ_RX_DESC_UPD_STEP 8192
+#define ER_DZ_RX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/* TX_DESC_UPD_REG: */
+#define ER_DZ_TX_DESC_UPD 0x00000a10
+#define ER_DZ_TX_DESC_UPD_STEP 8192
+#define ER_DZ_TX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+/* EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+/* MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+/* RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_RX_EV_RSVD2_LBN 54
+#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DZ_RX_EV_SOFT1_LBN 32
+#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
+#define ESF_DZ_RX_EV_RSVD1_LBN 31
+#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_DZ_RX_ABORT_LBN 30
+#define ESF_DZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+/* RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+/* RX_USER_DESC */
+#define ESF_DZ_RX_USR_RESERVED_LBN 62
+#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
+#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+
+/* TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+/* TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_TX_EV_RSVD_LBN 48
+#define ESF_DZ_TX_EV_RSVD_WIDTH 10
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DZ_TX_CAN_MERGE_LBN 31
+#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_DZ_TX_SOFT1_LBN 24
+#define ESF_DZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+/* TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+/* TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+/* TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+/* TX_USER_DESC */
+#define ESF_DZ_TX_USR_TYPE_LBN 63
+#define ESF_DZ_TX_USR_TYPE_WIDTH 1
+#define ESF_DZ_TX_USR_CONT_LBN 62
+#define ESF_DZ_TX_USR_CONT_WIDTH 1
+#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+/*************************************************************************/
+
+/* TX_DESC_UPD_REG: Transmit descriptor update register.
+ * We may write just one dword of these registers.
+ */
+#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH
+
+/* The workaround for bug 35388 requires multiplexing writes through
+ * the TX_DESC_UPD_DWORD address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* TX_PIOBUF
+ * PIO buffer aperture (paged)
+ */
+#define ER_DZ_TX_PIOBUF 4096
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+
+/* RX packet prefix */
+#define ES_DZ_RX_PREFIX_HASH_OFST 0
+#define ES_DZ_RX_PREFIX_VLAN1_OFST 4
+#define ES_DZ_RX_PREFIX_VLAN2_OFST 6
+#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8
+#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10
+#define ES_DZ_RX_PREFIX_SIZE 14
+
+#endif /* EFX_EF10_REGS_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index c72968840f1a..07c9bc4c61bc 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -17,7 +17,6 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
-#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
@@ -81,8 +80,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
[RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
- [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
- [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
@@ -191,8 +189,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
+static int efx_soft_enable_interrupts(struct efx_nic *efx);
+static void efx_soft_disable_interrupts(struct efx_nic *efx);
static void efx_remove_channel(struct efx_channel *channel);
static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
@@ -248,30 +246,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- if (rx_queue->enabled)
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue);
}
return spent;
}
-/* Mark channel as finished processing
- *
- * Note that since we will not receive further interrupts for this
- * channel before we finish processing and call the eventq_read_ack()
- * method, there is no need to use the interrupt hold-off timers.
- */
-static inline void efx_channel_processed(struct efx_channel *channel)
-{
- /* The interrupt handler for this channel may set work_pending
- * as soon as we acknowledge the events we've seen. Make sure
- * it's cleared before then. */
- channel->work_pending = false;
- smp_wmb();
-
- efx_nic_eventq_read_ack(channel);
-}
-
/* NAPI poll handler
*
* NAPI guarantees serialisation of polls of the same device, which
@@ -316,58 +296,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
- * since efx_channel_processed() will have no effect if
+ * since efx_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled.
*/
napi_complete(napi);
- efx_channel_processed(channel);
+ efx_nic_eventq_read_ack(channel);
}
return spent;
}
-/* Process the eventq of the specified channel immediately on this CPU
- *
- * Disable hardware generated interrupts, wait for any existing
- * processing to finish, then directly poll (and ack ) the eventq.
- * Finally reenable NAPI and interrupts.
- *
- * This is for use only during a loopback self-test. It must not
- * deliver any packets up the stack as this can result in deadlock.
- */
-void efx_process_channel_now(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- BUG_ON(channel->channel >= efx->n_channels);
- BUG_ON(!channel->enabled);
- BUG_ON(!efx->loopback_selftest);
-
- /* Disable interrupts and wait for ISRs to complete */
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
- synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
- if (channel->irq)
- synchronize_irq(channel->irq);
-
- /* Wait for any NAPI processing to complete */
- napi_disable(&channel->napi_str);
-
- /* Poll the channel */
- efx_process_channel(channel, channel->eventq_mask + 1);
-
- /* Ack the eventq. This may cause an interrupt to be generated
- * when they are reenabled */
- efx_channel_processed(channel);
-
- napi_enable(&channel->napi_str);
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
-}
-
/* Create event queue
* Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against
@@ -391,14 +329,23 @@ static int efx_probe_eventq(struct efx_channel *channel)
}
/* Prepare channel's event queue */
-static void efx_init_eventq(struct efx_channel *channel)
+static int efx_init_eventq(struct efx_channel *channel)
{
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d init event queue\n", channel->channel);
+ struct efx_nic *efx = channel->efx;
+ int rc;
- channel->eventq_read_ptr = 0;
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
- efx_nic_init_eventq(channel);
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
}
/* Enable event queue processing and NAPI */
@@ -407,11 +354,7 @@ static void efx_start_eventq(struct efx_channel *channel)
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel);
- /* The interrupt handler for this channel may set work_pending
- * as soon as we enable it. Make sure it's cleared before
- * then. Similarly, make sure it sees the enabled flag set.
- */
- channel->work_pending = false;
+ /* Make sure the NAPI handler sees the enabled flag set */
channel->enabled = true;
smp_wmb();
@@ -431,10 +374,14 @@ static void efx_stop_eventq(struct efx_channel *channel)
static void efx_fini_eventq(struct efx_channel *channel)
{
+ if (!channel->eventq_init)
+ return;
+
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
}
static void efx_remove_eventq(struct efx_channel *channel)
@@ -583,8 +530,8 @@ static void efx_set_channel_names(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
channel->type->get_name(channel,
- efx->channel_name[channel->channel],
- sizeof(efx->channel_name[0]));
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
}
static int efx_probe_channels(struct efx_nic *efx)
@@ -634,13 +581,13 @@ static void efx_start_datapath(struct efx_nic *efx)
* support the current MTU, including padding for header
* alignment and overruns.
*/
- efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
+ efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
NET_IP_ALIGN + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
- efx->rx_scatter = false;
+ efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
} else if (efx->type->can_rx_scatter) {
BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
@@ -668,9 +615,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len, efx->rx_page_buf_step,
efx->rx_bufs_per_page, efx->rx_pages_per_batch);
- /* RX filters also have scatter-enabled flags */
+ /* RX filters may also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter)
- efx_filter_update_rx_scatter(efx);
+ efx->type->filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
@@ -684,11 +631,14 @@ static void efx_start_datapath(struct efx_nic *efx)
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
efx_nic_generate_fill_event(rx_queue);
}
@@ -704,30 +654,15 @@ static void efx_stop_datapath(struct efx_nic *efx)
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- struct pci_dev *dev = efx->pci_dev;
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
- /* Only perform flush if dma is enabled */
- if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
- rc = efx_nic_flush_queues(efx);
-
- if (rc && EFX_WORKAROUND_7803(efx)) {
- /* Schedule a reset to recover from the flush failure. The
- * descriptor caches reference memory we're about to free,
- * but falcon_reconfigure_mac_wrapper() won't reconnect
- * the MACs because of the pending reset. */
- netif_err(efx, drv, efx->net_dev,
- "Resetting to recover from flush failure\n");
- efx_schedule_reset(efx, RESET_TYPE_ALL);
- } else if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
- } else {
- netif_dbg(efx, drv, efx->net_dev,
- "successfully flushed all queues\n");
- }
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
}
efx_for_each_channel(channel, efx) {
@@ -741,7 +676,26 @@ static void efx_stop_datapath(struct efx_nic *efx)
efx_stop_eventq(channel);
efx_start_eventq(channel);
}
+ }
+
+ rc = efx->type->fini_dmaq(efx);
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+ efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
@@ -779,7 +733,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
unsigned i, next_buffer_table = 0;
- int rc;
+ int rc, rc2;
rc = efx_check_disabled(efx);
if (rc)
@@ -809,7 +763,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, true);
+ efx_soft_disable_interrupts(efx);
/* Clone channels (where possible) */
memset(other_channel, 0, sizeof(other_channel));
@@ -859,9 +813,16 @@ out:
}
}
- efx_start_interrupts(efx, true);
- efx_start_all(efx);
- netif_device_attach(efx->net_dev);
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
+ }
return rc;
rollback:
@@ -931,10 +892,9 @@ void efx_link_status_changed(struct efx_nic *efx)
/* Status message for kernel log */
if (link_state->up)
netif_info(efx, link, efx->net_dev,
- "link up at %uMbps %s-duplex (MTU %d)%s\n",
+ "link up at %uMbps %s-duplex (MTU %d)\n",
link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu,
- (efx->promiscuous ? " [PROMISC]" : ""));
+ efx->net_dev->mtu);
else
netif_info(efx, link, efx->net_dev, "link down\n");
}
@@ -983,10 +943,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
WARN_ON(!mutex_is_locked(&efx->mac_lock));
- /* Serialise the promiscuous flag with efx_set_rx_mode. */
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
-
/* Disable PHY transmit in mac level loopbacks */
phy_mode = efx->phy_mode;
if (LOOPBACK_INTERNAL(efx))
@@ -1144,6 +1100,7 @@ static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
+ unsigned int mem_map_size = efx->type->mem_map_size(efx);
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1196,20 +1153,18 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO;
goto fail3;
}
- efx->membase = ioremap_nocache(efx->membase_phys,
- efx->type->mem_map_size);
+ efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size);
+ (unsigned long long)efx->membase_phys, mem_map_size);
rc = -ENOMEM;
goto fail4;
}
netif_dbg(efx, probe, efx->net_dev,
"memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size, efx->membase);
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
return 0;
@@ -1288,8 +1243,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
*/
static int efx_probe_interrupts(struct efx_nic *efx)
{
- unsigned int max_channels =
- min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
unsigned int extra_channels = 0;
unsigned int i, j;
int rc;
@@ -1306,7 +1259,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
if (separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;
- n_channels = min(n_channels, max_channels);
+ n_channels = min(n_channels, efx->max_channels);
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
@@ -1392,31 +1345,42 @@ static int efx_probe_interrupts(struct efx_nic *efx)
return 0;
}
-/* Enable interrupts, then probe and start the event queues */
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static int efx_soft_enable_interrupts(struct efx_nic *efx)
{
- struct efx_channel *channel;
+ struct efx_channel *channel, *end_channel;
+ int rc;
BUG_ON(efx->state == STATE_DISABLED);
- if (efx->eeh_disabled_legacy_irq) {
- enable_irq(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = false;
- }
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
+ efx->irq_soft_enabled = true;
+ smp_wmb();
efx_for_each_channel(channel, efx) {
- if (!channel->type->keep_eventq || !may_keep_eventq)
- efx_init_eventq(channel);
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
efx_start_eventq(channel);
}
efx_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
}
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static void efx_soft_disable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
@@ -1425,20 +1389,79 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
efx_mcdi_mode_poll(efx);
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
efx_stop_eventq(channel);
- if (!channel->type->keep_eventq || !may_keep_eventq)
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_mcdi_flush_async(efx);
+}
+
+static int efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
efx_fini_eventq(channel);
}
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+static void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
}
static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1495,9 +1518,11 @@ static int efx_probe_nic(struct efx_nic *efx)
* in MSI-X interrupts. */
rc = efx_probe_interrupts(efx);
if (rc)
- goto fail;
+ goto fail1;
- efx->type->dimension_resources(efx);
+ rc = efx->type->dimension_resources(efx);
+ if (rc)
+ goto fail2;
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1515,7 +1540,9 @@ static int efx_probe_nic(struct efx_nic *efx)
return 0;
-fail:
+fail2:
+ efx_remove_interrupts(efx);
+fail1:
efx->type->remove(efx);
return rc;
}
@@ -1528,6 +1555,44 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx);
}
+static int efx_probe_filters(struct efx_nic *efx)
+{
+ int rc;
+
+ spin_lock_init(&efx->filter_lock);
+
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*efx->rps_flow_id),
+ GFP_KERNEL);
+ if (!efx->rps_flow_id) {
+ efx->type->filter_table_remove(efx);
+ return -ENOMEM;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void efx_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_flow_id);
+#endif
+ efx->type->filter_table_remove(efx);
+}
+
+static void efx_restore_filters(struct efx_nic *efx)
+{
+ efx->type->filter_table_restore(efx);
+}
+
/**************************************************************************
*
* NIC startup/shutdown
@@ -1917,34 +1982,9 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
spin_lock_bh(&efx->stats_lock);
-
- efx->type->update_stats(efx);
-
- stats->rx_packets = mac_stats->rx_packets;
- stats->tx_packets = mac_stats->tx_packets;
- stats->rx_bytes = mac_stats->rx_bytes;
- stats->tx_bytes = mac_stats->tx_bytes;
- stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
- stats->multicast = mac_stats->rx_multicast;
- stats->collisions = mac_stats->tx_collision;
- stats->rx_length_errors = (mac_stats->rx_gtjumbo +
- mac_stats->rx_length_error);
- stats->rx_crc_errors = mac_stats->rx_bad;
- stats->rx_frame_errors = mac_stats->rx_align_error;
- stats->rx_fifo_errors = mac_stats->rx_overflow;
- stats->rx_missed_errors = mac_stats->rx_missed;
- stats->tx_window_errors = mac_stats->tx_late_collision;
-
- stats->rx_errors = (stats->rx_length_errors +
- stats->rx_crc_errors +
- stats->rx_frame_errors +
- mac_stats->rx_symbol_error);
- stats->tx_errors = (stats->tx_window_errors +
- mac_stats->tx_bad);
-
+ efx->type->update_stats(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
return stats;
@@ -2018,30 +2058,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_rx_mode(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct netdev_hw_addr *ha;
- union efx_multicast_hash *mc_hash = &efx->multicast_hash;
- u32 crc;
- int bit;
-
- efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
-
- /* Build multicast hash table */
- if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
- memset(mc_hash, 0xff, sizeof(*mc_hash));
- } else {
- memset(mc_hash, 0x00, sizeof(*mc_hash));
- netdev_for_each_mc_addr(ha, net_dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
- __set_bit_le(bit, mc_hash);
- }
-
- /* Broadcast packets go through the multicast hash filter.
- * ether_crc_le() of the broadcast address is 0xbe2612ff
- * so we always add bit 0xff to the mask.
- */
- __set_bit_le(0xff, mc_hash);
- }
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
@@ -2059,7 +2075,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
-static const struct net_device_ops efx_netdev_ops = {
+static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
@@ -2086,6 +2102,26 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
};
+static const struct net_device_ops efx_ef10_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_tx_timeout = efx_watchdog,
+ .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_change_mtu,
+ .ndo_set_mac_address = efx_set_mac_address,
+ .ndo_set_rx_mode = efx_set_rx_mode,
+ .ndo_set_features = efx_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = efx_netpoll,
+#endif
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
+};
+
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
@@ -2098,7 +2134,8 @@ static int efx_netdev_event(struct notifier_block *this,
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if (net_dev->netdev_ops == &efx_netdev_ops &&
+ if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
+ net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
@@ -2125,7 +2162,12 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
- net_dev->netdev_ops = &efx_netdev_ops;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ net_dev->netdev_ops = &efx_ef10_netdev_ops;
+ net_dev->priv_flags |= IFF_UNICAST_FLT;
+ } else {
+ net_dev->netdev_ops = &efx_farch_netdev_ops;
+ }
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
@@ -2185,22 +2227,11 @@ fail_locked:
static void efx_unregister_netdev(struct efx_nic *efx)
{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
if (!efx->net_dev)
return;
BUG_ON(netdev_priv(efx->net_dev) != efx);
- /* Free up any skbs still remaining. This has to happen before
- * we try to unregister the netdev as running their destructors
- * may be needed to get the device ref. count to 0. */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_release_tx_buffers(tx_queue);
- }
-
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2223,7 +2254,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2260,9 +2291,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
"could not restore PHY settings\n");
}
- efx->type->reconfigure_mac(efx);
-
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
efx_restore_filters(efx);
efx_sriov_reset(efx);
@@ -2458,6 +2489,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
.driver_data = (unsigned long) &siena_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{0} /* end of list */
};
@@ -2516,6 +2549,9 @@ static int efx_init_struct(struct efx_nic *efx,
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2527,10 +2563,10 @@ static int efx_init_struct(struct efx_nic *efx,
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
if (!efx->channel[i])
goto fail;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
}
- EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
-
/* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
@@ -2579,7 +2615,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
BUG_ON(efx->state == STATE_READY);
cancel_work_sync(&efx->reset_work);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
@@ -2601,7 +2637,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
dev_close(efx->net_dev);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
rtnl_unlock();
efx_sriov_fini(efx);
@@ -2703,10 +2739,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail6;
return 0;
+ fail6:
+ efx_nic_fini_interrupt(efx);
fail5:
efx_fini_port(efx);
fail4:
@@ -2824,7 +2864,7 @@ static int efx_pm_freeze(struct device *dev)
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
}
rtnl_unlock();
@@ -2834,12 +2874,15 @@ static int efx_pm_freeze(struct device *dev)
static int efx_pm_thaw(struct device *dev)
{
+ int rc;
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
@@ -2860,6 +2903,11 @@ static int efx_pm_thaw(struct device *dev)
queue_work(reset_workqueue, &efx->reset_work);
return 0;
+
+fail:
+ rtnl_unlock();
+
+ return rc;
}
static int efx_pm_poweroff(struct device *dev)
@@ -2896,8 +2944,8 @@ static int efx_pm_resume(struct device *dev)
rc = efx->type->init(efx);
if (rc)
return rc;
- efx_pm_thaw(dev);
- return 0;
+ rc = efx_pm_thaw(dev);
+ return rc;
}
static int efx_pm_suspend(struct device *dev)
@@ -2942,7 +2990,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
status = PCI_ERS_RESULT_NEED_RESET;
} else {
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index bdb30bbb0c97..34d00f5771fe 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t
efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
@@ -69,27 +68,99 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */
-extern int efx_probe_filters(struct efx_nic *efx);
-extern void efx_restore_filters(struct efx_nic *efx);
-extern void efx_remove_filters(struct efx_nic *efx);
-extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_filter_insert_filter(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace);
-extern int efx_filter_remove_id_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id);
-extern int efx_filter_get_filter_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *);
-extern void efx_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size);
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
+ *
+ * On success, return the filter ID.
+ * On failure, return a negative error code.
+ *
+ * If existing filters have equal match values to the new filter spec,
+ * then the new filter might replace them or the function might fail,
+ * as follows.
+ *
+ * 1. If the existing filters have lower priority, or @replace_equal
+ * is set and they have equal priority, replace them.
+ *
+ * 2. If the existing filters have higher priority, return -%EPERM.
+ *
+ * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
+ * support delivery to multiple recipients, return -%EEXIST.
+ *
+ * This implies that filters for multiple multicast recipients must
+ * all be inserted with the same priority and @replace_equal = %false.
+ */
+static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ return efx->type->filter_insert(efx, spec, replace_equal);
+}
+
+/**
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx->type->filter_remove_safe(efx, priority, filter_id);
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int
+efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ return efx->type->filter_get_safe(efx, priority, filter_id, spec);
+}
+
+/**
+ * efx_farch_filter_clear_rx - remove RX filters by priority
+ * @efx: NIC from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+static inline void efx_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_clear_rx(efx, priority);
+}
+
+static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_count_rx_used(efx, priority);
+}
+static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ return efx->type->filter_get_rx_id_limit(efx);
+}
+static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ return efx->type->filter_get_rx_ids(efx, priority, buf, size);
+}
#ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
@@ -105,11 +176,11 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
#define efx_filter_rfs_enabled() 0
#endif
+extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
/* Channels */
extern int efx_channel_dummy_op_int(struct efx_channel *channel);
extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern void efx_process_channel_now(struct efx_channel *channel);
extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
@@ -141,7 +212,12 @@ extern void efx_port_dummy_op_void(struct efx_nic *efx);
/* MTD */
#ifdef CONFIG_SFC_MTD
-extern int efx_mtd_probe(struct efx_nic *efx);
+extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
+static inline int efx_mtd_probe(struct efx_nic *efx)
+{
+ return efx->type->mtd_probe(efx);
+}
extern void efx_mtd_rename(struct efx_nic *efx);
extern void efx_mtd_remove(struct efx_nic *efx);
#else
@@ -155,7 +231,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
- channel->work_pending = true;
napi_schedule(&channel->napi_str);
}
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index ab8fb5889e55..7fdfee019092 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -147,8 +147,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
- * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
- * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
+ * @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
*/
@@ -163,8 +162,7 @@ enum reset_type {
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
RESET_TYPE_RX_RECOVERY,
- RESET_TYPE_RX_DESC_FETCH,
- RESET_TYPE_TX_DESC_FETCH,
+ RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
RESET_TYPE_MAX,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1fc21458413d..5b471cf5c323 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,14 +19,9 @@
#include "filter.h"
#include "nic.h"
-struct ethtool_string {
- char name[ETH_GSTRING_LEN];
-};
-
-struct efx_ethtool_stat {
+struct efx_sw_stat_desc {
const char *name;
enum {
- EFX_ETHTOOL_STAT_SOURCE_mac_stats,
EFX_ETHTOOL_STAT_SOURCE_nic,
EFX_ETHTOOL_STAT_SOURCE_channel,
EFX_ETHTOOL_STAT_SOURCE_tx_queue
@@ -35,7 +30,7 @@ struct efx_ethtool_stat {
u64(*get_stat) (void *field); /* Reader function */
};
-/* Initialiser for a struct #efx_ethtool_stat with type-checking */
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
get_stat_function) { \
.name = #stat_name, \
@@ -52,24 +47,11 @@ static u64 efx_get_uint_stat(void *field)
return *(unsigned int *)field;
}
-static u64 efx_get_u64_stat(void *field)
-{
- return *(u64 *) field;
-}
-
static u64 efx_get_atomic_stat(void *field)
{
return atomic_read((atomic_t *) field);
}
-#define EFX_ETHTOOL_U64_MAC_STAT(field) \
- EFX_ETHTOOL_STAT(field, mac_stats, field, \
- u64, efx_get_u64_stat)
-
-#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
- EFX_ETHTOOL_STAT(name, nic, n_##name, \
- unsigned int, efx_get_uint_stat)
-
#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
EFX_ETHTOOL_STAT(field, nic, field, \
atomic_t, efx_get_atomic_stat)
@@ -82,72 +64,12 @@ static u64 efx_get_atomic_stat(void *field)
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
unsigned int, efx_get_uint_stat)
-static const struct efx_ethtool_stat efx_ethtool_stats[] = {
- EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
- EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
- EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
- EFX_ETHTOOL_U64_MAC_STAT(tx_control),
- EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(tx_64),
- EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
- EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
- EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
- EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
- EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
- EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
- EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
- EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
- EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+ EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
- EFX_ETHTOOL_U64_MAC_STAT(rx_good),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
- EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
- EFX_ETHTOOL_U64_MAC_STAT(rx_control),
- EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
- EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
- EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
- EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
- EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
- EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
- EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
- EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
- EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -155,10 +77,11 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
};
-/* Number of ethtool statistics */
-#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
@@ -205,8 +128,6 @@ static int efx_ethtool_get_settings(struct net_device *net_dev,
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
- /* GMAC does not support 1000Mbps HD */
- ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -291,12 +212,11 @@ static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
*
* Fill in an individual self-test entry.
*/
-static void efx_fill_test(unsigned int test_index,
- struct ethtool_string *strings, u64 *data,
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
int *test, const char *unit_format, int unit_id,
const char *test_format, const char *test_id)
{
- struct ethtool_string unit_str, test_str;
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
/* Fill data value, if applicable */
if (data)
@@ -305,15 +225,14 @@ static void efx_fill_test(unsigned int test_index,
/* Fill string, if applicable */
if (strings) {
if (strchr(unit_format, '%'))
- snprintf(unit_str.name, sizeof(unit_str.name),
+ snprintf(unit_str, sizeof(unit_str),
unit_format, unit_id);
else
- strcpy(unit_str.name, unit_format);
- snprintf(test_str.name, sizeof(test_str.name),
- test_format, test_id);
- snprintf(strings[test_index].name,
- sizeof(strings[test_index].name),
- "%-6s %-24s", unit_str.name, test_str.name);
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
}
}
@@ -336,7 +255,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
enum efx_loopback_mode mode,
unsigned int test_index,
- struct ethtool_string *strings, u64 *data)
+ u8 *strings, u64 *data)
{
struct efx_channel *channel =
efx_get_channel(efx, efx->tx_channel_offset);
@@ -373,8 +292,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
- struct ethtool_string *strings,
- u64 *data)
+ u8 *strings, u64 *data)
{
struct efx_channel *channel;
unsigned int n = 0, i;
@@ -433,12 +351,14 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
static int efx_ethtool_get_sset_count(struct net_device *net_dev,
int string_set)
{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
switch (string_set) {
case ETH_SS_STATS:
- return EFX_ETHTOOL_NUM_STATS;
+ return efx->type->describe_stats(efx, NULL) +
+ EFX_ETHTOOL_SW_STAT_COUNT;
case ETH_SS_TEST:
- return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
- NULL, NULL, NULL);
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default:
return -EINVAL;
}
@@ -448,20 +368,18 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct ethtool_string *ethtool_strings =
- (struct ethtool_string *)strings;
int i;
switch (string_set) {
case ETH_SS_STATS:
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
- strlcpy(ethtool_strings[i].name,
- efx_ethtool_stats[i].name,
- sizeof(ethtool_strings[i].name));
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+ strlcpy(strings + i * ETH_GSTRING_LEN,
+ efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
break;
case ETH_SS_TEST:
- efx_ethtool_fill_self_tests(efx, NULL,
- ethtool_strings, NULL);
+ efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break;
default:
/* No other string sets */
@@ -474,27 +392,20 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
- const struct efx_ethtool_stat *stat;
+ const struct efx_sw_stat_desc *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
int i;
- EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
-
spin_lock_bh(&efx->stats_lock);
- /* Update MAC and NIC statistics */
- efx->type->update_stats(efx);
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
- /* Fill detailed statistics buffer */
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
- stat = &efx_ethtool_stats[i];
+ /* Get software statistics */
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &efx_sw_stat_desc[i];
switch (stat->source) {
- case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
- data[i] = stat->get_stat((void *)mac_stats +
- stat->offset);
- break;
case EFX_ETHTOOL_STAT_SOURCE_nic:
data[i] = stat->get_stat((void *)efx + stat->offset);
break;
@@ -709,7 +620,6 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
u8 wanted_fc, old_fc;
u32 old_adv;
- bool reset;
int rc = 0;
mutex_lock(&efx->mac_lock);
@@ -732,24 +642,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
goto out;
}
- /* TX flow control may automatically turn itself off if the
- * link partner (intermittently) stops responding to pause
- * frames. There isn't any indication that this has happened,
- * so the best we do is leave it up to the user to spot this
- * and fix it be cycling transmit flow control on this end. */
- reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
- if (EFX_WORKAROUND_11482(efx) && reset) {
- if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
- /* Recover by resetting the EM block */
- falcon_stop_nic_stats(efx);
- falcon_drain_tx_fifo(efx);
- falcon_reconfigure_xmac(efx);
- falcon_start_nic_stats(efx);
- } else {
- /* Schedule a reset to recover */
- efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
- }
- }
+ /* Hook for Falcon bug 11482 workaround */
+ if (efx->type->prepare_enable_fc_tx &&
+ (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
+ efx->type->prepare_enable_fc_tx(efx);
old_adv = efx->link_advertising;
old_fc = efx->wanted_fc;
@@ -814,11 +710,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc);
}
-/* MAC address mask including only MC flag */
-static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+/* MAC address mask including only I/G bit */
+static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
#define PORT_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule)
@@ -828,8 +725,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethhdr *mac_entry = &rule->h_u.ether_spec;
struct ethhdr *mac_mask = &rule->m_u.ether_spec;
struct efx_filter_spec spec;
- u16 vid;
- u8 proto;
int rc;
rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
@@ -837,44 +732,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
if (rc)
return rc;
- if (spec.dmaq_id == 0xfff)
+ if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
rule->ring_cookie = RX_CLS_FLOW_DISC;
else
rule->ring_cookie = spec.dmaq_id;
- if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
- rule->flow_type = ETHER_FLOW;
- memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
- if (spec.type == EFX_FILTER_MC_DEF)
- memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
- return 0;
- }
-
- rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
- if (rc == 0) {
+ if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V4_FLOW : UDP_V4_FLOW);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ ip_entry->ip4dst = spec.loc_host[0];
+ ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ ip_entry->ip4src = spec.rem_host[0];
+ ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+ ip_entry->pdst = spec.loc_port;
+ ip_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+ ip_entry->psrc = spec.rem_port;
+ ip_mask->psrc = PORT_FULL_MASK;
+ }
+ } else if (!(spec.match_flags &
+ ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_OUTER_VID))) {
rule->flow_type = ETHER_FLOW;
- memset(mac_mask->h_dest, ~0, ETH_ALEN);
- if (vid != EFX_FILTER_VID_UNSPEC) {
- rule->flow_type |= FLOW_EXT;
- rule->h_ext.vlan_tci = htons(vid);
- rule->m_ext.vlan_tci = htons(0xfff);
+ if (spec.match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
+ memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
+ memset(mac_mask->h_dest, ~0, ETH_ALEN);
+ else
+ memcpy(mac_mask->h_dest, mac_addr_ig_mask,
+ ETH_ALEN);
}
- return 0;
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
+ memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
+ memset(mac_mask->h_source, ~0, ETH_ALEN);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+ mac_entry->h_proto = spec.ether_type;
+ mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ } else {
+ /* The above should handle all filters that we insert */
+ WARN_ON(1);
+ return -EINVAL;
}
- rc = efx_filter_get_ipv4_local(&spec, &proto,
- &ip_entry->ip4dst, &ip_entry->pdst);
- if (rc != 0) {
- rc = efx_filter_get_ipv4_full(
- &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
- &ip_entry->ip4src, &ip_entry->psrc);
- EFX_WARN_ON_PARANOID(rc);
- ip_mask->ip4src = IP4_ADDR_FULL_MASK;
- ip_mask->psrc = PORT_FULL_MASK;
+ if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = spec.outer_vid;
+ rule->m_ext.vlan_tci = htons(0xfff);
}
- rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
- ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
- ip_mask->pdst = PORT_FULL_MASK;
+
return rc;
}
@@ -982,82 +905,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ?
- 0xfff : rule->ring_cookie);
+ EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
- switch (rule->flow_type) {
+ switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
- case UDP_V4_FLOW: {
- u8 proto = (rule->flow_type == TCP_V4_FLOW ?
- IPPROTO_TCP : IPPROTO_UDP);
-
- /* Must match all of destination, */
- if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
- ip_mask->pdst == PORT_FULL_MASK))
- return -EINVAL;
- /* all or none of source, */
- if ((ip_mask->ip4src || ip_mask->psrc) &&
- !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
- ip_mask->psrc == PORT_FULL_MASK))
- return -EINVAL;
- /* and nothing else */
- if (ip_mask->tos || rule->m_ext.vlan_tci)
+ case UDP_V4_FLOW:
+ spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IP);
+ spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+ if (ip_mask->ip4dst) {
+ if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = ip_entry->ip4dst;
+ }
+ if (ip_mask->ip4src) {
+ if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = ip_entry->ip4src;
+ }
+ if (ip_mask->pdst) {
+ if (ip_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip_entry->pdst;
+ }
+ if (ip_mask->psrc) {
+ if (ip_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip_entry->psrc;
+ }
+ if (ip_mask->tos)
return -EINVAL;
-
- if (ip_mask->ip4src)
- rc = efx_filter_set_ipv4_full(&spec, proto,
- ip_entry->ip4dst,
- ip_entry->pdst,
- ip_entry->ip4src,
- ip_entry->psrc);
- else
- rc = efx_filter_set_ipv4_local(&spec, proto,
- ip_entry->ip4dst,
- ip_entry->pdst);
- if (rc)
- return rc;
break;
- }
-
- case ETHER_FLOW | FLOW_EXT:
- case ETHER_FLOW: {
- u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
- ntohs(rule->m_ext.vlan_tci) : 0);
-
- /* Must not match on source address or Ethertype */
- if (!is_zero_ether_addr(mac_mask->h_source) ||
- mac_mask->h_proto)
- return -EINVAL;
- /* Is it a default UC or MC filter? */
- if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) &&
- vlan_tag_mask == 0) {
- if (is_multicast_ether_addr(mac_entry->h_dest))
- rc = efx_filter_set_mc_def(&spec);
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(mac_mask->h_dest)) {
+ if (ether_addr_equal(mac_mask->h_dest,
+ mac_addr_ig_mask))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ else if (is_broadcast_ether_addr(mac_mask->h_dest))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
else
- rc = efx_filter_set_uc_def(&spec);
+ return -EINVAL;
+ memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
}
- /* Otherwise, it must match all of destination and all
- * or none of VID.
- */
- else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
- (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
- rc = efx_filter_set_eth_local(
- &spec,
- vlan_tag_mask ?
- ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
- mac_entry->h_dest);
- } else {
- rc = -EINVAL;
+ if (!is_zero_ether_addr(mac_mask->h_source)) {
+ if (!is_broadcast_ether_addr(mac_mask->h_source))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
+ }
+ if (mac_mask->h_proto) {
+ if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = mac_entry->h_proto;
}
- if (rc)
- return rc;
break;
- }
default:
return -EINVAL;
}
+ if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
+ if (rule->m_ext.vlan_tci != htons(0xfff))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec.outer_vid = rule->h_ext.vlan_tci;
+ }
+
rc = efx_filter_insert_filter(efx, &spec, true);
if (rc < 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 71998e7995d9..ff5d322b9b49 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,17 +19,284 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
-#include "spi.h"
#include "nic.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
#include "selftest.h"
+#include "mdio_10g.h"
/* Hardware control for SFC4000 (aka Falcon). */
+/**************************************************************************
+ *
+ * NIC stats
+ *
+ **************************************************************************
+ */
+
+#define FALCON_MAC_STATS_SIZE 0x100
+
+#define XgRxOctets_offset 0x0
+#define XgRxOctets_WIDTH 48
+#define XgRxOctetsOK_offset 0x8
+#define XgRxOctetsOK_WIDTH 48
+#define XgRxPkts_offset 0x10
+#define XgRxPkts_WIDTH 32
+#define XgRxPktsOK_offset 0x14
+#define XgRxPktsOK_WIDTH 32
+#define XgRxBroadcastPkts_offset 0x18
+#define XgRxBroadcastPkts_WIDTH 32
+#define XgRxMulticastPkts_offset 0x1C
+#define XgRxMulticastPkts_WIDTH 32
+#define XgRxUnicastPkts_offset 0x20
+#define XgRxUnicastPkts_WIDTH 32
+#define XgRxUndersizePkts_offset 0x24
+#define XgRxUndersizePkts_WIDTH 32
+#define XgRxOversizePkts_offset 0x28
+#define XgRxOversizePkts_WIDTH 32
+#define XgRxJabberPkts_offset 0x2C
+#define XgRxJabberPkts_WIDTH 32
+#define XgRxUndersizeFCSerrorPkts_offset 0x30
+#define XgRxUndersizeFCSerrorPkts_WIDTH 32
+#define XgRxDropEvents_offset 0x34
+#define XgRxDropEvents_WIDTH 32
+#define XgRxFCSerrorPkts_offset 0x38
+#define XgRxFCSerrorPkts_WIDTH 32
+#define XgRxAlignError_offset 0x3C
+#define XgRxAlignError_WIDTH 32
+#define XgRxSymbolError_offset 0x40
+#define XgRxSymbolError_WIDTH 32
+#define XgRxInternalMACError_offset 0x44
+#define XgRxInternalMACError_WIDTH 32
+#define XgRxControlPkts_offset 0x48
+#define XgRxControlPkts_WIDTH 32
+#define XgRxPausePkts_offset 0x4C
+#define XgRxPausePkts_WIDTH 32
+#define XgRxPkts64Octets_offset 0x50
+#define XgRxPkts64Octets_WIDTH 32
+#define XgRxPkts65to127Octets_offset 0x54
+#define XgRxPkts65to127Octets_WIDTH 32
+#define XgRxPkts128to255Octets_offset 0x58
+#define XgRxPkts128to255Octets_WIDTH 32
+#define XgRxPkts256to511Octets_offset 0x5C
+#define XgRxPkts256to511Octets_WIDTH 32
+#define XgRxPkts512to1023Octets_offset 0x60
+#define XgRxPkts512to1023Octets_WIDTH 32
+#define XgRxPkts1024to15xxOctets_offset 0x64
+#define XgRxPkts1024to15xxOctets_WIDTH 32
+#define XgRxPkts15xxtoMaxOctets_offset 0x68
+#define XgRxPkts15xxtoMaxOctets_WIDTH 32
+#define XgRxLengthError_offset 0x6C
+#define XgRxLengthError_WIDTH 32
+#define XgTxPkts_offset 0x80
+#define XgTxPkts_WIDTH 32
+#define XgTxOctets_offset 0x88
+#define XgTxOctets_WIDTH 48
+#define XgTxMulticastPkts_offset 0x90
+#define XgTxMulticastPkts_WIDTH 32
+#define XgTxBroadcastPkts_offset 0x94
+#define XgTxBroadcastPkts_WIDTH 32
+#define XgTxUnicastPkts_offset 0x98
+#define XgTxUnicastPkts_WIDTH 32
+#define XgTxControlPkts_offset 0x9C
+#define XgTxControlPkts_WIDTH 32
+#define XgTxPausePkts_offset 0xA0
+#define XgTxPausePkts_WIDTH 32
+#define XgTxPkts64Octets_offset 0xA4
+#define XgTxPkts64Octets_WIDTH 32
+#define XgTxPkts65to127Octets_offset 0xA8
+#define XgTxPkts65to127Octets_WIDTH 32
+#define XgTxPkts128to255Octets_offset 0xAC
+#define XgTxPkts128to255Octets_WIDTH 32
+#define XgTxPkts256to511Octets_offset 0xB0
+#define XgTxPkts256to511Octets_WIDTH 32
+#define XgTxPkts512to1023Octets_offset 0xB4
+#define XgTxPkts512to1023Octets_WIDTH 32
+#define XgTxPkts1024to15xxOctets_offset 0xB8
+#define XgTxPkts1024to15xxOctets_WIDTH 32
+#define XgTxPkts1519toMaxOctets_offset 0xBC
+#define XgTxPkts1519toMaxOctets_WIDTH 32
+#define XgTxUndersizePkts_offset 0xC0
+#define XgTxUndersizePkts_WIDTH 32
+#define XgTxOversizePkts_offset 0xC4
+#define XgTxOversizePkts_WIDTH 32
+#define XgTxNonTcpUdpPkt_offset 0xC8
+#define XgTxNonTcpUdpPkt_WIDTH 16
+#define XgTxMacSrcErrPkt_offset 0xCC
+#define XgTxMacSrcErrPkt_WIDTH 16
+#define XgTxIpSrcErrPkt_offset 0xD0
+#define XgTxIpSrcErrPkt_WIDTH 16
+#define XgDmaDone_offset 0xD4
+#define XgDmaDone_WIDTH 32
+
+#define FALCON_XMAC_STATS_DMA_FLAG(efx) \
+ (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
+
+#define FALCON_DMA_STAT(ext_name, hw_name) \
+ [FALCON_STAT_ ## ext_name] = \
+ { #ext_name, \
+ /* 48-bit stats are zero-padded to 64 on DMA */ \
+ hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \
+ hw_name ## _ ## offset }
+#define FALCON_OTHER_STAT(ext_name) \
+ [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
+ FALCON_DMA_STAT(tx_bytes, XgTxOctets),
+ FALCON_DMA_STAT(tx_packets, XgTxPkts),
+ FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
+ FALCON_DMA_STAT(tx_control, XgTxControlPkts),
+ FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
+ FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
+ FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
+ FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
+ FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
+ FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
+ FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
+ FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
+ FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
+ FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
+ FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
+ FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
+ FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
+ FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
+ FALCON_DMA_STAT(rx_bytes, XgRxOctets),
+ FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
+ FALCON_OTHER_STAT(rx_bad_bytes),
+ FALCON_DMA_STAT(rx_packets, XgRxPkts),
+ FALCON_DMA_STAT(rx_good, XgRxPktsOK),
+ FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
+ FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
+ FALCON_DMA_STAT(rx_control, XgRxControlPkts),
+ FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
+ FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
+ FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
+ FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
+ FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
+ FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
+ FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
+ FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
+ FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
+ FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
+ FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
+ FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
+ FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
+ FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
+ FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
+ FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
+ FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
+ FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
+ FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
+};
+static const unsigned long falcon_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
+};
+
+/**************************************************************************
+ *
+ * Basic SPI command set and bit definitions
+ *
+ *************************************************************************/
+
+#define SPI_WRSR 0x01 /* Write status register */
+#define SPI_WRITE 0x02 /* Write data to memory array */
+#define SPI_READ 0x03 /* Read data from memory array */
+#define SPI_WRDI 0x04 /* Reset write enable latch */
+#define SPI_RDSR 0x05 /* Read status register */
+#define SPI_WREN 0x06 /* Set write enable latch */
+#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
+
+#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
+#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
+#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
+#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
+#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
+#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
+
+/**************************************************************************
+ *
+ * Non-volatile memory layout
+ *
+ **************************************************************************
+ */
+
+/* SFC4000 flash is partitioned into:
+ * 0-0x400 chip and board config (see struct falcon_nvconfig)
+ * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
+ * 0x8000-end boot code (mapped to PCI expansion ROM)
+ * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
+ * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
+ * 0-0x400 chip and board config
+ * configurable VPD
+ * 0x800-0x1800 boot config
+ * Aside from the chip and board config, all of these are optional and may
+ * be absent or truncated depending on the devices used.
+ */
+#define FALCON_NVCONFIG_END 0x400U
+#define FALCON_FLASH_BOOTCODE_START 0x8000U
+#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
+#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
+
+/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
+struct falcon_nvconfig_board_v2 {
+ __le16 nports;
+ u8 port0_phy_addr;
+ u8 port0_phy_type;
+ u8 port1_phy_addr;
+ u8 port1_phy_type;
+ __le16 asic_sub_revision;
+ __le16 board_revision;
+} __packed;
+
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+ __le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field) \
+ (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define FALCON_NVCONFIG_OFFSET 0x300
+
+#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
+struct falcon_nvconfig {
+ efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
+ u8 mac_address[2][8]; /* 0x310 */
+ efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
+ efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
+ efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
+ efx_oword_t hw_init_reg; /* 0x350 */
+ efx_oword_t nic_stat_reg; /* 0x360 */
+ efx_oword_t glb_ctl_reg; /* 0x370 */
+ efx_oword_t srm_cfg_reg; /* 0x380 */
+ efx_oword_t spare_reg; /* 0x390 */
+ __le16 board_magic_num; /* 0x3A0 */
+ __le16 board_struct_ver;
+ __le16 board_checksum;
+ struct falcon_nvconfig_board_v2 board_v2;
+ efx_oword_t ee_base_page_reg; /* 0x3B0 */
+ struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
+} __packed;
+
+/*************************************************************************/
+
static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
@@ -146,7 +413,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
*
* NB most hardware supports MSI interrupts
*/
-inline void falcon_irq_ack_a1(struct efx_nic *efx)
+static inline void falcon_irq_ack_a1(struct efx_nic *efx)
{
efx_dword_t reg;
@@ -156,7 +423,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx)
}
-irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
+static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
@@ -177,10 +444,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
/* Check to see if we have a serious error condition */
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ return efx_farch_fatal_interrupt(efx);
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
@@ -241,9 +511,10 @@ static int falcon_spi_wait(struct efx_nic *efx)
}
}
-int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
- unsigned int command, int address,
- const void *in, void *out, size_t len)
+static int
+falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ unsigned int command, int address,
+ const void *in, void *out, size_t len)
{
bool addressed = (address >= 0);
bool reading = (out != NULL);
@@ -297,48 +568,16 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
return 0;
}
-static size_t
-falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
-{
- return min(FALCON_SPI_MAX_LEN,
- (spi->block_size - (start & (spi->block_size - 1))));
-}
-
static inline u8
-efx_spi_munge_command(const struct efx_spi_device *spi,
- const u8 command, const unsigned int address)
+falcon_spi_munge_command(const struct falcon_spi_device *spi,
+ const u8 command, const unsigned int address)
{
return command | (((address >> 8) & spi->munge_address) << 3);
}
-/* Wait up to 10 ms for buffered write completion */
-int
-falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
-{
- unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
- u8 status;
- int rc;
-
- for (;;) {
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
- if (!(status & SPI_STATUS_NRDY))
- return 0;
- if (time_after_eq(jiffies, timeout)) {
- netif_err(efx, hw, efx->net_dev,
- "SPI write timeout on device %d"
- " last status=0x%02x\n",
- spi->device_id, status);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- }
-}
-
-int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
- loff_t start, size_t len, size_t *retlen, u8 *buffer)
+static int
+falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ loff_t start, size_t len, size_t *retlen, u8 *buffer)
{
size_t block_len, pos = 0;
unsigned int command;
@@ -347,7 +586,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
while (pos < len) {
block_len = min(len - pos, FALCON_SPI_MAX_LEN);
- command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
buffer + pos, block_len);
if (rc)
@@ -367,8 +606,52 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
return rc;
}
-int
-falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
+#ifdef CONFIG_SFC_MTD
+
+struct falcon_mtd_partition {
+ struct efx_mtd_partition common;
+ const struct falcon_spi_device *spi;
+ size_t offset;
+};
+
+#define to_falcon_mtd_partition(mtd) \
+ container_of(mtd, struct falcon_mtd_partition, common.mtd)
+
+static size_t
+falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
+{
+ return min(FALCON_SPI_MAX_LEN,
+ (spi->block_size - (start & (spi->block_size - 1))));
+}
+
+/* Wait up to 10 ms for buffered write completion */
+static int
+falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
+ u8 status;
+ int rc;
+
+ for (;;) {
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (time_after_eq(jiffies, timeout)) {
+ netif_err(efx, hw, efx->net_dev,
+ "SPI write timeout on device %d"
+ " last status=0x%02x\n",
+ spi->device_id, status);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+static int
+falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, const u8 *buffer)
{
u8 verify_buffer[FALCON_SPI_MAX_LEN];
@@ -383,7 +666,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
block_len = min(len - pos,
falcon_spi_write_limit(spi, start + pos));
- command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos,
buffer + pos, NULL, block_len);
if (rc)
@@ -393,7 +676,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
if (rc)
break;
- command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos,
NULL, verify_buffer, block_len);
if (memcmp(verify_buffer, buffer + pos, block_len)) {
@@ -416,6 +699,520 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
return rc;
}
+static int
+falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ u8 status;
+ int rc, i;
+
+ /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
+ for (i = 0; i < 40; i++) {
+ __set_current_state(uninterruptible ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ pr_err("%s: timed out waiting for %s\n",
+ part->common.name, part->common.dev_type_name);
+ return -ETIMEDOUT;
+}
+
+static int
+falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
+ SPI_STATUS_BP0);
+ u8 status;
+ int rc;
+
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+
+ if (!(status & unlock_mask))
+ return 0; /* already unlocked */
+
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+
+ status &= ~unlock_mask;
+ rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
+ NULL, sizeof(status));
+ if (rc)
+ return rc;
+ rc = falcon_spi_wait_write(efx, spi);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+#define FALCON_SPI_VERIFY_BUF_LEN 16
+
+static int
+falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ unsigned pos, block_len;
+ u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
+ u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
+ int rc;
+
+ if (len != spi->erase_size)
+ return -EINVAL;
+
+ if (spi->erase_command == 0)
+ return -EOPNOTSUPP;
+
+ rc = falcon_spi_unlock(efx, spi);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
+ NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_slow_wait(part, false);
+
+ /* Verify the entire region has been wiped */
+ memset(empty, 0xff, sizeof(empty));
+ for (pos = 0; pos < len; pos += block_len) {
+ block_len = min(len - pos, sizeof(buffer));
+ rc = falcon_spi_read(efx, spi, start + pos, block_len,
+ NULL, buffer);
+ if (rc)
+ return rc;
+ if (memcmp(empty, buffer, block_len))
+ return -EIO;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return rc;
+}
+
+static void falcon_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s",
+ efx->name, part->type_name);
+}
+
+static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_read(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_erase(part, part->offset + start, len);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_write(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_sync(struct mtd_info *mtd)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = falcon_spi_slow_wait(part, true);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_probe(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_mtd_partition *parts;
+ struct falcon_spi_device *spi;
+ size_t n_parts;
+ int rc = -ENODEV;
+
+ ASSERT_RTNL();
+
+ /* Allocate space for maximum number of partitions */
+ parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+ n_parts = 0;
+
+ spi = &nic_data->spi_flash;
+ if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.dev_type_name = "flash";
+ parts[n_parts].common.type_name = "sfc_flash_bootrom";
+ parts[n_parts].common.mtd.type = MTD_NORFLASH;
+ parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
+ parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ spi = &nic_data->spi_eeprom;
+ if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.dev_type_name = "EEPROM";
+ parts[n_parts].common.type_name = "sfc_bootconfig";
+ parts[n_parts].common.mtd.type = MTD_RAM;
+ parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
+ parts[n_parts].common.mtd.size =
+ min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
+ FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * XMAC operations
+ *
+ **************************************************************************
+ */
+
+/* Configure the XAUI driver that is an output from Falcon */
+static void falcon_setup_xaui(struct efx_nic *efx)
+{
+ efx_oword_t sdctl, txdrv;
+
+ /* Move the XAUI into low power, unless there is no PHY, in
+ * which case the XAUI will have to drive a cable. */
+ if (efx->phy_type == PHY_TYPE_NONE)
+ return;
+
+ efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
+
+ EFX_POPULATE_OWORD_8(txdrv,
+ FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
+ efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
+}
+
+int falcon_reset_xaui(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int count;
+
+ /* Don't fetch MAC statistics over an XMAC reset */
+ WARN_ON(nic_data->stats_disable_count == 0);
+
+ /* Start reset sequence */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
+
+ /* Wait up to 10 ms for completion, then reinitialise */
+ for (count = 0; count < 1000; count++) {
+ efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
+ if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
+ EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
+ falcon_setup_xaui(efx);
+ return 0;
+ }
+ udelay(10);
+ }
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XAUI/XGXS reset\n");
+ return -ETIMEDOUT;
+}
+
+static void falcon_ack_status_intr(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+
+ if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
+ return;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up)
+ return;
+
+ /* We can only use this interrupt to signal the negative edge of
+ * xaui_align [we have to poll the positive edge]. */
+ if (nic_data->xmac_poll_required)
+ return;
+
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
+}
+
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool align_done, link_ok = false;
+ int sync_status;
+
+ /* Read link status */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
+ sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
+ if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
+ link_ok = true;
+
+ /* Clear link status ready for next read */
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ return link_ok;
+}
+
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
+static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
+{
+ unsigned int max_frame_len;
+ efx_oword_t reg;
+ bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
+ bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
+
+ /* Configure MAC - cut-thru mode is hard wired on */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AB_XM_RX_JUMBO_MODE, 1,
+ FRF_AB_XM_TX_STAT_EN, 1,
+ FRF_AB_XM_RX_STAT_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+
+ /* Configure TX */
+ EFX_POPULATE_OWORD_6(reg,
+ FRF_AB_XM_TXEN, 1,
+ FRF_AB_XM_TX_PRMBL, 1,
+ FRF_AB_XM_AUTO_PAD, 1,
+ FRF_AB_XM_TXCRC, 1,
+ FRF_AB_XM_FCNTL, tx_fc,
+ FRF_AB_XM_IPG, 0x3);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
+
+ /* Configure RX */
+ EFX_POPULATE_OWORD_5(reg,
+ FRF_AB_XM_RXEN, 1,
+ FRF_AB_XM_AUTO_DEPAD, 0,
+ FRF_AB_XM_ACPT_ALL_MCAST, 1,
+ FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
+ FRF_AB_XM_PASS_CRC_ERR, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
+
+ /* Set frame length */
+ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
+ FRF_AB_XM_TX_JUMBO_MODE, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
+ FRF_AB_XM_DIS_FCNTL, !rx_fc);
+ efx_writeo(efx, &reg, FR_AB_XM_FC);
+
+ /* Set MAC address */
+ memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
+ memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
+}
+
+static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
+ bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
+ bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
+ bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+
+ /* XGXS block is flaky and will need to be reset if moving
+ * into our out of XGMII, XGXS or XAUI loopbacks. */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
+ old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
+
+ /* The PHY driver may have turned XAUI off */
+ if ((xgxs_loopback != old_xgxs_loopback) ||
+ (xaui_loopback != old_xaui_loopback) ||
+ (xgmii_loopback != old_xgmii_loopback))
+ falcon_reset_xaui(efx);
+
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
+ (xgxs_loopback || xaui_loopback) ?
+ FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
+}
+
+
+/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
+{
+ bool mac_up = falcon_xmac_link_ok(efx);
+
+ if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
+ efx_phy_mode_disabled(efx->phy_mode))
+ /* XAUI link is expected to be down */
+ return mac_up;
+
+ falcon_stop_nic_stats(efx);
+
+ while (!mac_up && tries) {
+ netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
+ falcon_reset_xaui(efx);
+ udelay(200);
+
+ mac_up = falcon_xmac_link_ok(efx);
+ --tries;
+ }
+
+ falcon_start_nic_stats(efx);
+
+ return mac_up;
+}
+
+static bool falcon_xmac_check_fault(struct efx_nic *efx)
+{
+ return !falcon_xmac_link_ok_retry(efx, 5);
+}
+
+static int falcon_reconfigure_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ efx_farch_filter_sync_rx_mode(efx);
+
+ falcon_reconfigure_xgxs_core(efx);
+ falcon_reconfigure_xmac_core(efx);
+
+ falcon_reconfigure_mac_wrapper(efx);
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
+ falcon_ack_status_intr(efx);
+
+ return 0;
+}
+
+static void falcon_poll_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up || !nic_data->xmac_poll_required)
+ return;
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
+ falcon_ack_status_intr(efx);
+}
+
/**************************************************************************
*
* MAC wrapper
@@ -497,7 +1294,7 @@ static void falcon_reset_macs(struct efx_nic *efx)
falcon_setup_xaui(efx);
}
-void falcon_drain_tx_fifo(struct efx_nic *efx)
+static void falcon_drain_tx_fifo(struct efx_nic *efx)
{
efx_oword_t reg;
@@ -529,7 +1326,7 @@ static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
falcon_drain_tx_fifo(efx);
}
-void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
efx_oword_t reg;
@@ -550,7 +1347,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_POPULATE_OWORD_5(reg,
FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
FRF_AB_MAC_BCAD_ACPT, 1,
- FRF_AB_MAC_UC_PROM, efx->promiscuous,
+ FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
FRF_AB_MAC_LINK_STATUS, 1, /* always set */
FRF_AB_MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get
@@ -583,10 +1380,7 @@ static void falcon_stats_request(struct efx_nic *efx)
WARN_ON(nic_data->stats_pending);
WARN_ON(nic_data->stats_disable_count);
- if (nic_data->stats_dma_done == NULL)
- return; /* no mac selected */
-
- *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
+ FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
nic_data->stats_pending = true;
wmb(); /* ensure done flag is clear */
@@ -608,9 +1402,11 @@ static void falcon_stats_complete(struct efx_nic *efx)
return;
nic_data->stats_pending = false;
- if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
rmb(); /* read the done flag before the stats */
- falcon_update_stats_xmac(efx);
+ efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, nic_data->stats,
+ efx->stats_buffer.addr, true);
} else {
netif_err(efx, hw, efx->net_dev,
"timed out waiting for statistics\n");
@@ -678,6 +1474,28 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
return 0;
}
+/* TX flow control may automatically turn itself off if the link
+ * partner (intermittently) stops responding to pause frames. There
+ * isn't any indication that this has happened, so the best we do is
+ * leave it up to the user to spot this and fix it by cycling transmit
+ * flow control on this end.
+ */
+
+static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Schedule a reset to recover */
+ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+}
+
+static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Recover by resetting the EM block */
+ falcon_stop_nic_stats(efx);
+ falcon_drain_tx_fifo(efx);
+ falcon_reconfigure_xmac(efx);
+ falcon_start_nic_stats(efx);
+}
+
/**************************************************************************
*
* PHY access via GMII
@@ -861,7 +1679,7 @@ static int falcon_probe_port(struct efx_nic *efx)
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- FALCON_MAC_STATS_SIZE);
+ FALCON_MAC_STATS_SIZE, GFP_KERNEL);
if (rc)
return rc;
netif_dbg(efx, probe, efx->net_dev,
@@ -869,7 +1687,6 @@ static int falcon_probe_port(struct efx_nic *efx)
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
- nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
return 0;
}
@@ -926,15 +1743,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
- struct efx_spi_device *spi;
+ struct falcon_spi_device *spi;
void *region;
int rc, magic_num, struct_ver;
__le16 *word, *limit;
u32 csum;
- if (efx_spi_present(&nic_data->spi_flash))
+ if (falcon_spi_present(&nic_data->spi_flash))
spi = &nic_data->spi_flash;
- else if (efx_spi_present(&nic_data->spi_eeprom))
+ else if (falcon_spi_present(&nic_data->spi_eeprom))
spi = &nic_data->spi_eeprom;
else
return -EINVAL;
@@ -949,7 +1766,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
mutex_unlock(&nic_data->spi_lock);
if (rc) {
netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
- efx_spi_present(&nic_data->spi_flash) ?
+ falcon_spi_present(&nic_data->spi_flash) ?
"flash" : "EEPROM");
rc = -EIO;
goto out;
@@ -998,7 +1815,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
return falcon_read_nvram(efx, NULL);
}
-static const struct efx_nic_register_test falcon_b0_register_tests[] = {
+static const struct efx_farch_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG,
@@ -1058,8 +1875,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
efx_reset_down(efx, reset_method);
tests->registers =
- efx_nic_test_registers(efx, falcon_b0_register_tests,
- ARRAY_SIZE(falcon_b0_register_tests))
+ efx_farch_test_registers(efx, falcon_b0_register_tests,
+ ARRAY_SIZE(falcon_b0_register_tests))
? -1 : 1;
rc = falcon_reset_hw(efx, reset_method);
@@ -1078,8 +1895,7 @@ static enum reset_type falcon_map_reset_reason(enum reset_type reason)
{
switch (reason) {
case RESET_TYPE_RX_RECOVERY:
- case RESET_TYPE_RX_DESC_FETCH:
- case RESET_TYPE_TX_DESC_FETCH:
+ case RESET_TYPE_DMA_ERROR:
case RESET_TYPE_TX_SKIP:
/* These can occasionally occur due to hardware bugs.
* We try to reset without disrupting the link.
@@ -1294,7 +2110,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
}
static void falcon_spi_device_init(struct efx_nic *efx,
- struct efx_spi_device *spi_device,
+ struct falcon_spi_device *spi_device,
unsigned int device_id, u32 device_type)
{
if (device_type != 0) {
@@ -1360,10 +2176,11 @@ out:
return rc;
}
-static void falcon_dimension_resources(struct efx_nic *efx)
+static int falcon_dimension_resources(struct efx_nic *efx)
{
efx->rx_dc_base = 0x20000;
efx->tx_dc_base = 0x26000;
+ return 0;
}
/* Probe all SPI devices on the NIC */
@@ -1410,6 +2227,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
large_eeprom_type);
}
+static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
+{
+ return 0x20000;
+}
+
+static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
+{
+ /* Map everything up to and including the RSS indirection table.
+ * The PCI core takes care of mapping the MSI-X tables.
+ */
+ return FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
+}
+
static int falcon_probe_nic(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data;
@@ -1424,7 +2255,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = -ENODEV;
- if (efx_nic_fpga_ver(efx) != 0) {
+ if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Falcon FPGA not supported\n");
goto fail1;
@@ -1478,7 +2309,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -1499,6 +2331,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
+ EFX_MAX_CHANNELS);
efx->timer_quantum_ns = 4968; /* 621 cycles */
/* Initialise I2C adapter */
@@ -1657,7 +2491,7 @@ static int falcon_init_nic(struct efx_nic *efx)
efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
}
- efx_nic_init_common(efx);
+ efx_farch_init_common(efx);
return 0;
}
@@ -1688,24 +2522,65 @@ static void falcon_remove_nic(struct efx_nic *efx)
efx->nic_data = NULL;
}
-static void falcon_update_nic_stats(struct efx_nic *efx)
+static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, names);
+}
+
+static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
struct falcon_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
efx_oword_t cnt;
- if (nic_data->stats_disable_count)
- return;
+ if (!nic_data->stats_disable_count) {
+ efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
+ stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
+ EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+
+ if (nic_data->stats_pending &&
+ FALCON_XMAC_STATS_DMA_FLAG(efx)) {
+ nic_data->stats_pending = false;
+ rmb(); /* read the done flag before the stats */
+ efx_nic_update_stats(
+ falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask,
+ stats, efx->stats_buffer.addr, true);
+ }
- efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
- efx->n_rx_nodesc_drop_cnt +=
- EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+ /* Update derived statistic */
+ efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
+ stats[FALCON_STAT_rx_bytes] -
+ stats[FALCON_STAT_rx_good_bytes] -
+ stats[FALCON_STAT_rx_control] * 64);
+ }
- if (nic_data->stats_pending &&
- *nic_data->stats_dma_done == FALCON_STATS_DONE) {
- nic_data->stats_pending = false;
- rmb(); /* read the done flag before the stats */
- falcon_update_stats_xmac(efx);
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
+ core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
+ core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt];
+ core_stats->multicast = stats[FALCON_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[FALCON_STAT_rx_gtjumbo] +
+ stats[FALCON_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[FALCON_STAT_rx_symbol_error]);
}
+
+ return FALCON_STAT_COUNT;
}
void falcon_start_nic_stats(struct efx_nic *efx)
@@ -1734,7 +2609,7 @@ void falcon_stop_nic_stats(struct efx_nic *efx)
/* Wait enough time for the most recent transfer to
* complete. */
for (i = 0; i < 4 && nic_data->stats_pending; i++) {
- if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx))
break;
msleep(1);
}
@@ -1778,11 +2653,12 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/
const struct efx_nic_type falcon_a1_nic_type = {
+ .mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
.dimension_resources = falcon_dimension_resources,
- .fini = efx_port_dummy_op_void,
+ .fini = falcon_irq_ack_a1,
.monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason,
.map_reset_flags = falcon_map_reset_flags,
@@ -1790,23 +2666,71 @@ const struct efx_nic_type falcon_a1_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
.reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
.reconfigure_mac = falcon_reconfigure_xmac,
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void,
.test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = falcon_legacy_interrupt_a1,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+
+ /* We don't expose the filter table on Falcon A1 as it is not
+ * mapped into function 0, but these implementations still
+ * work with a degenerate case of all tables set to size 0.
+ */
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
.revision = EFX_REV_FALCON_A1,
- .mem_map_size = 0x20000,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
@@ -1816,12 +2740,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
.rx_buffer_padding = 0x24,
.can_rx_scatter = false,
.max_interrupt_mode = EFX_INT_MODE_MSI,
- .phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
+ .mcdi_max_ver = -1,
};
const struct efx_nic_type falcon_b0_nic_type = {
+ .mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
@@ -1834,14 +2759,17 @@ const struct efx_nic_type falcon_b0_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
.reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
.reconfigure_mac = falcon_reconfigure_xmac,
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
@@ -1849,28 +2777,67 @@ const struct efx_nic_type falcon_b0_nic_type = {
.resume_wol = efx_port_dummy_op_void,
.test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
.revision = EFX_REV_FALCON_B0,
- /* Map everything up to and including the RSS indirection
- * table. Don't map MSI-X table, MSI-X PBA since Linux
- * requires that they not be mapped. */
- .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
- FR_BZ_RX_INDIRECTION_TBL_STEP *
- FR_BZ_RX_INDIRECTION_TBL_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
- .rx_buffer_hash_size = 0x10,
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
- .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
- * interrupt handler only supports 32
- * channels */
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
+ .mcdi_max_ver = -1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index ec1e99d0dcad..1736f4b806af 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
deleted file mode 100644
index 8333865d4c95..000000000000
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ /dev/null
@@ -1,362 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/delay.h>
-#include "net_driver.h"
-#include "efx.h"
-#include "nic.h"
-#include "regs.h"
-#include "io.h"
-#include "mdio_10g.h"
-#include "workarounds.h"
-
-/**************************************************************************
- *
- * MAC operations
- *
- *************************************************************************/
-
-/* Configure the XAUI driver that is an output from Falcon */
-void falcon_setup_xaui(struct efx_nic *efx)
-{
- efx_oword_t sdctl, txdrv;
-
- /* Move the XAUI into low power, unless there is no PHY, in
- * which case the XAUI will have to drive a cable. */
- if (efx->phy_type == PHY_TYPE_NONE)
- return;
-
- efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
-
- EFX_POPULATE_OWORD_8(txdrv,
- FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
- efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
-}
-
-int falcon_reset_xaui(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
- int count;
-
- /* Don't fetch MAC statistics over an XMAC reset */
- WARN_ON(nic_data->stats_disable_count == 0);
-
- /* Start reset sequence */
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
-
- /* Wait up to 10 ms for completion, then reinitialise */
- for (count = 0; count < 1000; count++) {
- efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
- if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
- EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
- falcon_setup_xaui(efx);
- return 0;
- }
- udelay(10);
- }
- netif_err(efx, hw, efx->net_dev,
- "timed out waiting for XAUI/XGXS reset\n");
- return -ETIMEDOUT;
-}
-
-static void falcon_ack_status_intr(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
-
- if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
- return;
-
- /* We expect xgmii faults if the wireside link is down */
- if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
- return;
-
- /* We can only use this interrupt to signal the negative edge of
- * xaui_align [we have to poll the positive edge]. */
- if (nic_data->xmac_poll_required)
- return;
-
- efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
-}
-
-static bool falcon_xgxs_link_ok(struct efx_nic *efx)
-{
- efx_oword_t reg;
- bool align_done, link_ok = false;
- int sync_status;
-
- /* Read link status */
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
-
- align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
- sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
- if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
- link_ok = true;
-
- /* Clear link status ready for next read */
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
-
- return link_ok;
-}
-
-static bool falcon_xmac_link_ok(struct efx_nic *efx)
-{
- /*
- * Check MAC's XGXS link status except when using XGMII loopback
- * which bypasses the XGXS block.
- * If possible, check PHY's XGXS link status except when using
- * MAC loopback.
- */
- return (efx->loopback_mode == LOOPBACK_XGMII ||
- falcon_xgxs_link_ok(efx)) &&
- (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
- LOOPBACK_INTERNAL(efx) ||
- efx_mdio_phyxgxs_lane_sync(efx));
-}
-
-static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
-{
- unsigned int max_frame_len;
- efx_oword_t reg;
- bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
- bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
-
- /* Configure MAC - cut-thru mode is hard wired on */
- EFX_POPULATE_OWORD_3(reg,
- FRF_AB_XM_RX_JUMBO_MODE, 1,
- FRF_AB_XM_TX_STAT_EN, 1,
- FRF_AB_XM_RX_STAT_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
-
- /* Configure TX */
- EFX_POPULATE_OWORD_6(reg,
- FRF_AB_XM_TXEN, 1,
- FRF_AB_XM_TX_PRMBL, 1,
- FRF_AB_XM_AUTO_PAD, 1,
- FRF_AB_XM_TXCRC, 1,
- FRF_AB_XM_FCNTL, tx_fc,
- FRF_AB_XM_IPG, 0x3);
- efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
-
- /* Configure RX */
- EFX_POPULATE_OWORD_5(reg,
- FRF_AB_XM_RXEN, 1,
- FRF_AB_XM_AUTO_DEPAD, 0,
- FRF_AB_XM_ACPT_ALL_MCAST, 1,
- FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
- FRF_AB_XM_PASS_CRC_ERR, 1);
- efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
-
- /* Set frame length */
- max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
- efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
- FRF_AB_XM_TX_JUMBO_MODE, 1);
- efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
-
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
- FRF_AB_XM_DIS_FCNTL, !rx_fc);
- efx_writeo(efx, &reg, FR_AB_XM_FC);
-
- /* Set MAC address */
- memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
- memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
-}
-
-static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
-{
- efx_oword_t reg;
- bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
- bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
- bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
-
- /* XGXS block is flaky and will need to be reset if moving
- * into our out of XGMII, XGXS or XAUI loopbacks. */
- if (EFX_WORKAROUND_5147(efx)) {
- bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
- bool reset_xgxs;
-
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
- old_xgmii_loopback =
- EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
-
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
-
- /* The PHY driver may have turned XAUI off */
- reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
- (xaui_loopback != old_xaui_loopback) ||
- (xgmii_loopback != old_xgmii_loopback));
-
- if (reset_xgxs)
- falcon_reset_xaui(efx);
- }
-
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
- (xgxs_loopback || xaui_loopback) ?
- FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
-
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
-}
-
-
-/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
-{
- bool mac_up = falcon_xmac_link_ok(efx);
-
- if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
- efx_phy_mode_disabled(efx->phy_mode))
- /* XAUI link is expected to be down */
- return mac_up;
-
- falcon_stop_nic_stats(efx);
-
- while (!mac_up && tries) {
- netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
- falcon_reset_xaui(efx);
- udelay(200);
-
- mac_up = falcon_xmac_link_ok(efx);
- --tries;
- }
-
- falcon_start_nic_stats(efx);
-
- return mac_up;
-}
-
-bool falcon_xmac_check_fault(struct efx_nic *efx)
-{
- return !falcon_xmac_link_ok_retry(efx, 5);
-}
-
-int falcon_reconfigure_xmac(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
-
- falcon_reconfigure_xgxs_core(efx);
- falcon_reconfigure_xmac_core(efx);
-
- falcon_reconfigure_mac_wrapper(efx);
-
- nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
- falcon_ack_status_intr(efx);
-
- return 0;
-}
-
-void falcon_update_stats_xmac(struct efx_nic *efx)
-{
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
-
- /* Update MAC stats from DMAed values */
- FALCON_STAT(efx, XgRxOctets, rx_bytes);
- FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
- FALCON_STAT(efx, XgRxPkts, rx_packets);
- FALCON_STAT(efx, XgRxPktsOK, rx_good);
- FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
- FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
- FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
- FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
- FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
- FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
- FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
- FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
- FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
- FALCON_STAT(efx, XgRxAlignError, rx_align_error);
- FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
- FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
- FALCON_STAT(efx, XgRxControlPkts, rx_control);
- FALCON_STAT(efx, XgRxPausePkts, rx_pause);
- FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
- FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
- FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
- FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
- FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
- FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
- FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
- FALCON_STAT(efx, XgRxLengthError, rx_length_error);
- FALCON_STAT(efx, XgTxPkts, tx_packets);
- FALCON_STAT(efx, XgTxOctets, tx_bytes);
- FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
- FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
- FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
- FALCON_STAT(efx, XgTxControlPkts, tx_control);
- FALCON_STAT(efx, XgTxPausePkts, tx_pause);
- FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
- FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
- FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
- FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
- FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
- FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
- FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
- FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
- FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
- FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
- FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
- FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
-
- /* Update derived statistics */
- efx_update_diff_stat(&mac_stats->tx_good_bytes,
- mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
- mac_stats->tx_control * 64);
- efx_update_diff_stat(&mac_stats->rx_bad_bytes,
- mac_stats->rx_bytes - mac_stats->rx_good_bytes -
- mac_stats->rx_control * 64);
-}
-
-void falcon_poll_xmac(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
-
- if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
- !nic_data->xmac_poll_required)
- return;
-
- nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
- falcon_ack_status_intr(efx);
-}
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
new file mode 100644
index 000000000000..c0907d884d75
--- /dev/null
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -0,0 +1,2942 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/* Falcon-architecture (SFC4000 and SFC9000-family) support */
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+/* This is set to 16 for a good reason. In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding). This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 1
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 3
+
+/* If EFX_MAX_INT_ERRORS internal errors occur within
+ * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define EFX_INT_ERROR_EXPIRE 3600
+#define EFX_MAX_INT_ERRORS 5
+
+/* Depth of RX flush request fifo */
+#define EFX_RX_FLUSH_COUNT 4
+
+/* Driver generated events */
+#define _EFX_CHANNEL_MAGIC_TEST 0x000101
+#define _EFX_CHANNEL_MAGIC_FILL 0x000102
+#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
+#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
+
+#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
+#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
+
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
+#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
+ (_tx_queue)->queue)
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
+
+/**************************************************************************
+ *
+ * Hardware access
+ *
+ **************************************************************************/
+
+static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
+ unsigned int index)
+{
+ efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+ value, index);
+}
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+ const efx_oword_t *mask)
+{
+ return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+ ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs)
+{
+ unsigned address = 0, i, j;
+ efx_oword_t mask, imask, original, reg, buf;
+
+ for (i = 0; i < n_regs; ++i) {
+ address = regs[i].address;
+ mask = imask = regs[i].mask;
+ EFX_INVERT_OWORD(imask);
+
+ efx_reado(efx, &original, address);
+
+ /* bit sweep on and off */
+ for (j = 0; j < 128; j++) {
+ if (!EFX_EXTRACT_OWORD32(mask, j, j))
+ continue;
+
+ /* Test this testable bit can be set in isolation */
+ EFX_AND_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 1);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+
+ /* Test this testable bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 0);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+ }
+
+ efx_writeo(efx, &original, address);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * Special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * efx_alloc_special_buffer()) in the buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static void
+efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_qword_t buf_desc;
+ unsigned int index;
+ dma_addr_t dma_addr;
+ int i;
+
+ EFX_BUG_ON_PARANOID(!buffer->buf.addr);
+
+ /* Write buffer descriptors to NIC */
+ for (i = 0; i < buffer->entries; i++) {
+ index = buffer->index + i;
+ dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
+ EFX_POPULATE_QWORD_3(buf_desc,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ efx_write_buf_tbl(efx, &buf_desc, index);
+ }
+}
+
+/* Unmaps a buffer and clears the buffer table entries */
+static void
+efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_oword_t buf_tbl_upd;
+ unsigned int start = buffer->index;
+ unsigned int end = (buffer->index + buffer->entries - 1);
+
+ if (!buffer->entries)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
+
+ EFX_POPULATE_OWORD_4(buf_tbl_upd,
+ FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1,
+ FRF_AZ_BUF_CLR_END_ID, end,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
+}
+
+/*
+ * Allocate a new special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range. It does not write into the buffer table.
+ *
+ * This call will allocate 4KB buffers, since 8KB buffers can't be
+ * used for event queues and descriptor rings.
+ */
+static int efx_alloc_special_buffer(struct efx_nic *efx,
+ struct efx_special_buffer *buffer,
+ unsigned int len)
+{
+ len = ALIGN(len, EFX_BUF_SIZE);
+
+ if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
+ return -ENOMEM;
+ buffer->entries = len / EFX_BUF_SIZE;
+ BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
+
+ /* Select new buffer ID */
+ buffer->index = efx->next_buffer_table;
+ efx->next_buffer_table += buffer->entries;
+#ifdef CONFIG_SFC_SRIOV
+ BUG_ON(efx_sriov_enabled(efx) &&
+ efx->vf_buftbl_base < efx->next_buffer_table);
+#endif
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ return 0;
+}
+
+static void
+efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ if (!buffer->buf.addr)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, buffer->buf.len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ efx_nic_free_buffer(efx, &buffer->buf);
+ buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * TX path
+ *
+ **************************************************************************/
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
+}
+
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned write_ptr;
+ efx_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
+{
+
+ struct efx_tx_buffer *buffer;
+ efx_qword_t *txd;
+ unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
+
+ /* Create TX descriptor ring entry */
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_4(*txd,
+ FSF_AZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+ FSF_AZ_TX_KER_BUF_REGION, 0,
+ FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_farch_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_farch_notify_tx_desc(tx_queue);
+ }
+}
+
+/* Allocate hardware resources for a TX queue */
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &tx_queue->txd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
+
+ /* Pin TX descriptor ring */
+ efx_init_special_buffer(efx, &tx_queue->txd);
+
+ /* Push TX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(reg,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+ FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+ FRF_AZ_TX_DESCQ_EVQ_ID,
+ tx_queue->channel->channel,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+ FRF_AZ_TX_DESCQ_SIZE,
+ __ffs(tx_queue->txd.entries),
+ FRF_AZ_TX_DESCQ_TYPE, 0,
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
+ !csum);
+ }
+
+ efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ /* Only 128 bits in this register */
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
+
+ efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+ __clear_bit_le(tx_queue->queue, &reg);
+ else
+ __set_bit_le(tx_queue->queue, &reg);
+ efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
+}
+
+static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_flush_descq;
+
+ WARN_ON(atomic_read(&tx_queue->flush_outstanding));
+ atomic_set(&tx_queue->flush_outstanding, 1);
+
+ EFX_POPULATE_OWORD_2(tx_flush_descq,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+ efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
+}
+
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_desc_ptr;
+
+ /* Remove TX descriptor ring from card */
+ EFX_ZERO_OWORD(tx_desc_ptr);
+ efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ /* Unpin TX descriptor ring */
+ efx_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * RX path
+ *
+ **************************************************************************/
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_3(*rxd,
+ FSF_AZ_RX_KER_BUF_SIZE,
+ rx_buf->len -
+ rx_queue->efx->type->rx_buffer_padding,
+ FSF_AZ_RX_KER_BUF_REGION, 0,
+ FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_dword_t reg;
+ unsigned write_ptr;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ efx_farch_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ efx_rx_queue_index(rx_queue));
+}
+
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &rx_queue->rxd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+ bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
+ bool iscsi_digest_en = is_b0;
+ bool jumbo_en;
+
+ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
+ * DMA to continue after a PCIe page boundary (and scattering
+ * is not possible). In Falcon B0 and Siena, it enables
+ * scatter.
+ */
+ jumbo_en = !is_b0 || efx->rx_scatter;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+ rx_queue->scatter_n = 0;
+
+ /* Pin RX descriptor ring */
+ efx_init_special_buffer(efx, &rx_queue->rxd);
+
+ /* Push RX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(rx_desc_ptr,
+ FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+ FRF_AZ_RX_DESCQ_EVQ_ID,
+ efx_rx_queue_channel(rx_queue)->channel,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL,
+ efx_rx_queue_index(rx_queue),
+ FRF_AZ_RX_DESCQ_SIZE,
+ __ffs(rx_queue->rxd.entries),
+ FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
+ FRF_AZ_RX_DESCQ_EN, 1);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+}
+
+static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_oword_t rx_flush_descq;
+
+ EFX_POPULATE_OWORD_2(rx_flush_descq,
+ FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ,
+ efx_rx_queue_index(rx_queue));
+ efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
+}
+
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* Remove RX descriptor ring from card */
+ EFX_ZERO_OWORD(rx_desc_ptr);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+
+ /* Unpin RX descriptor ring */
+ efx_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+/* efx_farch_flush_queues() must be woken up when all flushes are completed,
+ * or more RX flushes can be kicked off.
+ */
+static bool efx_farch_flush_wake(struct efx_nic *efx)
+{
+ /* Ensure that all updates are visible to efx_farch_flush_queues() */
+ smp_mb();
+
+ return (atomic_read(&efx->active_queues) == 0 ||
+ (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
+ && atomic_read(&efx->rxq_flush_pending) > 0));
+}
+
+static bool efx_check_tx_flush_complete(struct efx_nic *efx)
+{
+ bool i = true;
+ efx_oword_t txd_ptr_tbl;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_reado_table(efx, &txd_ptr_tbl,
+ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
+ if (EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_FLUSH) ||
+ EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_EN)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush did not complete on TXQ %d\n",
+ tx_queue->queue);
+ i = false;
+ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
+ 1, 0)) {
+ /* The flush is complete, but we didn't
+ * receive a flush completion event
+ */
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush complete on TXQ %d, so drain "
+ "the queue\n", tx_queue->queue);
+ /* Don't need to increment active_queues as it
+ * has already been incremented for the queues
+ * which did not drain
+ */
+ efx_farch_magic_event(channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(
+ tx_queue));
+ }
+ }
+ }
+
+ return i;
+}
+
+/* Flush all the transmit queues, and continue flushing receive queues until
+ * they're all flushed. Wait for the DRAIN events to be recieved so that there
+ * are no more RX and TX events left on any channel. */
+static int efx_farch_do_flush(struct efx_nic *efx)
+{
+ unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_farch_flush_tx_queue(tx_queue);
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ }
+ }
+
+ while (timeout && atomic_read(&efx->active_queues) > 0) {
+ /* If SRIOV is enabled, then offload receive queue flushing to
+ * the firmware (though we will still have to poll for
+ * completion). If that fails, fall back to the old scheme.
+ */
+ if (efx_sriov_enabled(efx)) {
+ rc = efx_mcdi_flush_rxqs(efx);
+ if (!rc)
+ goto wait;
+ }
+
+ /* The hardware supports four concurrent rx flushes, each of
+ * which may need to be retried if there is an outstanding
+ * descriptor fetch
+ */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (atomic_read(&efx->rxq_flush_outstanding) >=
+ EFX_RX_FLUSH_COUNT)
+ break;
+
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ atomic_inc(&efx->rxq_flush_outstanding);
+ efx_farch_flush_rx_queue(rx_queue);
+ }
+ }
+ }
+
+ wait:
+ timeout = wait_event_timeout(efx->flush_wq,
+ efx_farch_flush_wake(efx),
+ timeout);
+ }
+
+ if (atomic_read(&efx->active_queues) &&
+ !efx_check_tx_flush_complete(efx)) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
+ "(rx %d+%d)\n", atomic_read(&efx->active_queues),
+ atomic_read(&efx->rxq_flush_outstanding),
+ atomic_read(&efx->rxq_flush_pending));
+ rc = -ETIMEDOUT;
+
+ atomic_set(&efx->active_queues, 0);
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ }
+
+ return rc;
+}
+
+int efx_farch_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc = 0;
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ /* Only perform flush if DMA is enabled */
+ if (efx->pci_dev->is_busmaster) {
+ efx->type->prepare_flush(efx);
+ rc = efx_farch_do_flush(efx);
+ efx->type->finish_flush(efx);
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_farch_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_farch_tx_fini(tx_queue);
+ }
+ }
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ */
+void efx_farch_ev_read_ack(struct efx_channel *channel)
+{
+ efx_dword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
+
+ /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
+ * of 4 bytes, but it is really 16 bytes just like later revisions.
+ */
+ efx_writed(efx, &reg,
+ efx->type->evq_rptr_tbl_base +
+ FR_BZ_EVQ_RPTR_STEP * channel->channel);
+}
+
+/* Use HW to insert a SW defined event */
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event)
+{
+ efx_oword_t drv_ev_reg;
+
+ BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+ FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+ drv_ev_reg.u32[0] = event->u32[0];
+ drv_ev_reg.u32[1] = event->u32[1];
+ drv_ev_reg.u32[2] = 0;
+ drv_ev_reg.u32[3] = 0;
+ EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
+ efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
+{
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_farch_generate_event(channel->efx, channel->channel, &event);
+}
+
+/* Handle a transmit completion event
+ *
+ * The NIC batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static int
+efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ struct efx_tx_queue *tx_queue;
+ struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ /* Rewrite the FIFO write pointer */
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+
+ netif_tx_lock(efx->net_dev);
+ efx_farch_notify_tx_desc(tx_queue);
+ netif_tx_unlock(efx->net_dev);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else {
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
+ }
+
+ return tx_packets;
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+ const efx_qword_t *event)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+ bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_other_err, rx_ev_pause_frm;
+ bool rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned rx_ev_pkt_type;
+
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+ rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
+ rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+ rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
+ rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+ rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+ rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
+ rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
+ 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+ rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
+
+ /* Every error apart from tobe_disc and pause_frm */
+ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+ /* Count errors that are not in MAC stats. Ignore expected
+ * checksum errors during self-test. */
+ if (rx_ev_frm_trunc)
+ ++channel->n_rx_frm_trunc;
+ else if (rx_ev_tobe_disc)
+ ++channel->n_rx_tobe_disc;
+ else if (!efx->loopback_selftest) {
+ if (rx_ev_ip_hdr_chksum_err)
+ ++channel->n_rx_ip_hdr_chksum_err;
+ else if (rx_ev_tcp_udp_chksum_err)
+ ++channel->n_rx_tcp_udp_chksum_err;
+ }
+
+ /* TOBE_DISC is expected on unicast mismatches; don't print out an
+ * error message. FRM_TRUNC indicates RXDP dropped the packet due
+ * to a FIFO overflow.
+ */
+#ifdef DEBUG
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
+ }
+#endif
+
+ /* The frame must be discarded if any of these are true. */
+ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ rx_ev_tobe_disc | rx_ev_pause_frm) ?
+ EFX_RX_PKT_DISCARD : 0;
+}
+
+/* Handle receive events that are not in-order. Return true if this
+ * can be handled as a partial packet discard, false if it's more
+ * serious.
+ */
+static bool
+efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned expected, dropped;
+
+ if (rx_queue->scatter_n &&
+ index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
+ rx_queue->ptr_mask)) {
+ ++channel->n_rx_nodesc_trunc;
+ return true;
+ }
+
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
+
+ efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return false;
+}
+
+/* Handle a packet received event
+ *
+ * The NIC gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static void
+efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
+{
+ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned expected_ptr;
+ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
+ u16 flags;
+ struct efx_rx_queue *rx_queue;
+ struct efx_nic *efx = channel->efx;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return;
+
+ rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
+ rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+ channel->channel);
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
+ rx_queue->ptr_mask);
+
+ /* Check for partial drops and other errors */
+ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
+ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
+ if (rx_ev_desc_ptr != expected_ptr &&
+ !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
+ return;
+
+ /* Discard all pending fragments */
+ if (rx_queue->scatter_n) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ }
+
+ /* Return if there is no new fragment */
+ if (rx_ev_desc_ptr != expected_ptr)
+ return;
+
+ /* Discard new fragment if not SOP */
+ if (!rx_ev_sop) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ 1, 0, EFX_RX_PKT_DISCARD);
+ ++rx_queue->removed_count;
+ return;
+ }
+ }
+
+ ++rx_queue->scatter_n;
+ if (rx_ev_cont)
+ return;
+
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+
+ if (likely(rx_ev_pkt_ok)) {
+ /* If packet is marked as OK then we can rely on the
+ * hardware checksum and classification.
+ */
+ flags = 0;
+ switch (rx_ev_hdr_type) {
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags |= EFX_RX_PKT_TCP;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags |= EFX_RX_PKT_CSUMMED;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ break;
+ }
+ } else {
+ flags = efx_farch_handle_rx_not_ok(rx_queue, event);
+ }
+
+ /* Detect multicast packets that didn't match the filter */
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ if (rx_ev_mcast_pkt) {
+ unsigned int rx_ev_mcast_hash_match =
+ EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
+
+ if (unlikely(!rx_ev_mcast_hash_match)) {
+ ++channel->n_rx_mcast_mismatch;
+ flags |= EFX_RX_PKT_DISCARD;
+ }
+ }
+
+ channel->irq_mod_score += 2;
+
+ /* Handle received packet */
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+}
+
+/* If this flush done event corresponds to a &struct efx_tx_queue, then
+ * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
+ * of all transmit completions.
+ */
+static void
+efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_tx_queue *tx_queue;
+ int qid;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
+ qid % EFX_TXQ_TYPES);
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
+ efx_farch_magic_event(tx_queue->channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+ }
+ }
+}
+
+/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
+ * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * the RX queue back to the mask of RX queues in need of flushing.
+ */
+static void
+efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ int qid;
+ bool failed;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (qid >= efx->n_channels)
+ return;
+ channel = efx_get_channel(efx, qid);
+ if (!efx_channel_has_rx_queue(channel))
+ return;
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (failed) {
+ netif_info(efx, hw, efx->net_dev,
+ "RXQ %d flush retry\n", qid);
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ } else {
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
+ }
+ atomic_dec(&efx->rxq_flush_outstanding);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void
+efx_farch_handle_drain_event(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ WARN_ON(atomic_read(&efx->active_queues) == 0);
+ atomic_dec(&efx->active_queues);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void efx_farch_handle_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue =
+ efx_channel_has_rx_queue(channel) ?
+ efx_channel_get_rx_queue(channel) : NULL;
+ unsigned magic, code;
+
+ magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ code = _EFX_CHANNEL_MAGIC_CODE(magic);
+
+ if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
+ channel->event_test_cpu = raw_smp_processor_id();
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(rx_queue);
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
+ efx_farch_handle_drain_event(channel);
+ } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
+ efx_farch_handle_drain_event(channel);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(*event));
+ }
+}
+
+static void
+efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int ev_sub_code;
+ unsigned int ev_sub_data;
+
+ ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+ ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ switch (ev_sub_code) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_tx_flush_done(efx, event);
+ efx_sriov_tx_flush_done(efx, event);
+ break;
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_rx_flush_done(efx, event);
+ efx_sriov_rx_flush_done(efx, event);
+ break;
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_SRM_UPD_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
+ break;
+ case FSE_AZ_WAKE_UP_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_TIMER_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AA_RX_RECOVER_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
+ "Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx,
+ EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY :
+ RESET_TYPE_DISABLE);
+ break;
+ case FSE_BZ_RX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ case FSE_BZ_TX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ default:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
+ break;
+ }
+}
+
+int efx_farch_ev_process(struct efx_channel *channel, int budget)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int read_ptr;
+ efx_qword_t event, *p_event;
+ int ev_code;
+ int tx_packets = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ /* End of events */
+ break;
+
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ /* Clear this event by marking it all ones */
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
+
+ switch (ev_code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ efx_farch_handle_rx_event(channel, &event);
+ if (++spent == budget)
+ goto out;
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ tx_packets += efx_farch_handle_tx_event(channel,
+ &event);
+ if (tx_packets > efx->txq_entries) {
+ spent = budget;
+ goto out;
+ }
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ efx_farch_handle_generated_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ efx_farch_handle_driver_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_USER_EV:
+ efx_sriov_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
+ default:
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EFX_QWORD_FMT ")\n", channel->channel,
+ ev_code, EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+/* Allocate buffer table entries for event queue */
+int efx_farch_ev_probe(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
+ return efx_alloc_special_buffer(efx, &channel->eventq,
+ entries * sizeof(efx_qword_t));
+}
+
+int efx_farch_ev_init(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+ }
+
+ /* Pin event queue buffer */
+ efx_init_special_buffer(efx, &channel->eventq);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ /* Push event queue to card */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+ FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+
+ return 0;
+}
+
+void efx_farch_ev_fini(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ /* Remove event queue from card */
+ EFX_ZERO_OWORD(reg);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+
+ /* Unpin event queue */
+ efx_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void efx_farch_ev_remove(struct efx_channel *channel)
+{
+ efx_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+void efx_farch_ev_test_generate(struct efx_channel *channel)
+{
+ efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
+}
+
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_FILL(rx_queue));
+}
+
+/**************************************************************************
+ *
+ * Hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate interrupts */
+static inline void efx_farch_interrupts(struct efx_nic *efx,
+ bool enabled, bool force)
+{
+ efx_oword_t int_en_reg_ker;
+
+ EFX_POPULATE_OWORD_3(int_en_reg_ker,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
+ FRF_AZ_KER_INT_KER, force,
+ FRF_AZ_DRV_INT_EN_KER, enabled);
+ efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
+}
+
+void efx_farch_irq_enable_master(struct efx_nic *efx)
+{
+ EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
+ wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+ efx_farch_interrupts(efx, true, false);
+}
+
+void efx_farch_irq_disable_master(struct efx_nic *efx)
+{
+ /* Disable interrupts */
+ efx_farch_interrupts(efx, false, false);
+}
+
+/* Generate a test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+void efx_farch_irq_test_generate(struct efx_nic *efx)
+{
+ efx_farch_interrupts(efx, true, true);
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ efx_oword_t fatal_intr;
+ int error, mem_perr;
+
+ efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+ error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
+
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+ EFX_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
+
+ /* If this is a memory parity error dump which blocks are offending */
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
+ if (mem_perr) {
+ efx_oword_t reg;
+ efx_reado(efx, &reg, FR_AZ_MEM_STAT);
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(reg));
+ }
+
+ /* Disable both devices */
+ pci_clear_master(efx->pci_dev);
+ if (efx_nic_is_dual_func(efx))
+ pci_clear_master(nic_data->pci_dev2);
+ efx_farch_irq_disable_master(efx);
+
+ /* Count errors and reset or disable the NIC accordingly */
+ if (efx->int_error_count == 0 ||
+ time_after(jiffies, efx->int_error_expire)) {
+ efx->int_error_count = 0;
+ efx->int_error_expire =
+ jiffies + EFX_INT_ERROR_EXPIRE * HZ;
+ }
+ if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
+ efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ irqreturn_t result = IRQ_NONE;
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+ int syserr;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, FR_BZ_INT_ISR0);
+ queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+
+ /* Legacy interrupts are disabled too late by the EEH kernel
+ * code. Disable them earlier.
+ * If an EEH error occurred, the read will have returned all ones.
+ */
+ if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
+ !efx->eeh_disabled_legacy_irq) {
+ disable_irq_nosync(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = true;
+ }
+
+ /* Handle non-event-queue sources */
+ if (queues & (1U << efx->irq_level) && soft_enabled) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ if (queues != 0) {
+ efx->irq_zero_count = 0;
+
+ /* Schedule processing of any interrupting queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+ result = IRQ_HANDLED;
+
+ } else {
+ efx_qword_t *event;
+
+ /* Legacy ISR read can return zero once (SF bug 15783) */
+
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ event = efx_event(channel,
+ channel->eventq_read_ptr);
+ if (efx_event_present(event))
+ efx_schedule_channel_irq(channel);
+ else
+ efx_farch_ev_read_ack(channel);
+ }
+ }
+ }
+
+ if (result == IRQ_HANDLED)
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return result;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
+ /* Handle non-event-queue sources */
+ if (context->index == efx->irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+
+ return IRQ_HANDLED;
+}
+
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+void efx_farch_rx_push_indir_table(struct efx_nic *efx)
+{
+ size_t i = 0;
+ efx_dword_t dword;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
+ efx->rx_indir_table[i]);
+ efx_writed(efx, &dword,
+ FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
+ }
+}
+
+/* Looks at available SRAM resources and works out how many queues we
+ * can support, and where things like descriptor caches should live.
+ *
+ * SRAM is split up as follows:
+ * 0 buftbl entries for channels
+ * efx->vf_buftbl_base buftbl entries for SR-IOV
+ * efx->rx_dc_base RX descriptor caches
+ * efx->tx_dc_base TX descriptor caches
+ */
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
+{
+ unsigned vi_count, buftbl_min;
+
+ /* Account for the buffer table entries backing the datapath channels
+ * and the descriptor caches for those channels.
+ */
+ buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
+ efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ efx->n_channels * EFX_MAX_EVQ_SIZE)
+ * sizeof(efx_qword_t) / EFX_BUF_SIZE);
+ vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef CONFIG_SFC_SRIOV
+ if (efx_sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
+
+ efx->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
+ }
+#endif
+
+ efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
+ efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
+}
+
+u32 efx_farch_fpga_ver(struct efx_nic *efx)
+{
+ efx_oword_t altera_build;
+ efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+ return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
+}
+
+void efx_farch_init_common(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set positions of descriptor caches in SRAM. */
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
+
+ /* Set TX descriptor cache size. */
+ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
+
+ /* Set RX descriptor cache size. Set low watermark to size-8, as
+ * this allows most efficient prefetching.
+ */
+ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
+
+ /* Program INT_KER address */
+ EFX_POPULATE_OWORD_2(temp,
+ FRF_AZ_NORM_INT_VEC_DIS_KER,
+ EFX_INT_MODE_USE_MSI(efx),
+ FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+ efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->irq_level = 0;
+
+ /* Enable all the genuinely fatal interrupts. (They are still
+ * masked by the overall interrupt mask, controlled by
+ * falcon_interrupts()).
+ *
+ * Note: All other fatal interrupts are enabled
+ */
+ EFX_POPULATE_OWORD_3(temp,
+ FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+ FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+ FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
+ EFX_INVERT_OWORD(temp);
+ efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
+
+ efx_farch_rx_push_indir_table(efx);
+
+ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ /* Enable SW_EV to inherit in char driver - assume harmless here */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
+ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+ /* Squash TX of packets of 16 bytes or less */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
+}
+
+/**************************************************************************
+ *
+ * Filter tables
+ *
+ **************************************************************************
+ */
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/* Hard maximum search limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_farch_filter_search() when the
+ * table is full.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
+
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
+
+enum efx_farch_filter_type {
+ EFX_FARCH_FILTER_TCP_FULL = 0,
+ EFX_FARCH_FILTER_TCP_WILD,
+ EFX_FARCH_FILTER_UDP_FULL,
+ EFX_FARCH_FILTER_UDP_WILD,
+ EFX_FARCH_FILTER_MAC_FULL = 4,
+ EFX_FARCH_FILTER_MAC_WILD,
+ EFX_FARCH_FILTER_UC_DEF = 8,
+ EFX_FARCH_FILTER_MC_DEF,
+ EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
+};
+
+enum efx_farch_filter_table_id {
+ EFX_FARCH_FILTER_TABLE_RX_IP = 0,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF,
+ EFX_FARCH_FILTER_TABLE_TX_MAC,
+ EFX_FARCH_FILTER_TABLE_COUNT,
+};
+
+enum efx_farch_filter_index {
+ EFX_FARCH_FILTER_INDEX_UC_DEF,
+ EFX_FARCH_FILTER_INDEX_MC_DEF,
+ EFX_FARCH_FILTER_SIZE_RX_DEF,
+};
+
+struct efx_farch_filter_spec {
+ u8 type:4;
+ u8 priority:4;
+ u8 flags;
+ u16 dmaq_id;
+ u32 data[3];
+};
+
+struct efx_farch_filter_table {
+ enum efx_farch_filter_table_id id;
+ u32 offset; /* address of table relative to BAR */
+ unsigned size; /* number of entries */
+ unsigned step; /* step between entries */
+ unsigned used; /* number currently used */
+ unsigned long *used_bitmap;
+ struct efx_farch_filter_spec *spec;
+ unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
+};
+
+struct efx_farch_filter_state {
+ struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
+};
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx);
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple. The initial LFSR state is 0xffff. */
+static u16 efx_farch_filter_hash(u32 key)
+{
+ u16 tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ key >> 16;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ key;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ return tmp ^ tmp >> 9;
+}
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static u16 efx_farch_filter_increment(u32 key)
+{
+ return key * 2 - 1;
+}
+
+static enum efx_farch_filter_table_id
+efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
+{
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
+ EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
+ return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
+}
+
+static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter_ctl;
+
+ efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+
+ /* There is a single bit to enable RX scatter for all
+ * unmatched packets. Only set it if scatter is
+ * enabled in both filter specs.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_SCATTER));
+ } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* We don't expose 'default' filters because unmatched
+ * packets always go to the queue number found in the
+ * RSS table. But we still need to set the RX scatter
+ * bit here.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ efx->rx_scatter);
+ }
+
+ efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+}
+
+static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t tx_cfg;
+
+ efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
+}
+
+static int
+efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
+ const struct efx_filter_spec *gen_spec)
+{
+ bool is_full = false;
+
+ if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
+ return -EINVAL;
+
+ spec->priority = gen_spec->priority;
+ spec->flags = gen_spec->flags;
+ spec->dmaq_id = gen_spec->dmaq_id;
+
+ switch (gen_spec->match_flags) {
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
+ is_full = true;
+ /* fall through */
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
+ __be32 rhost, host1, host2;
+ __be16 rport, port1, port2;
+
+ EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
+
+ if (gen_spec->ether_type != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+ if (gen_spec->loc_port == 0 ||
+ (is_full && gen_spec->rem_port == 0))
+ return -EADDRNOTAVAIL;
+ switch (gen_spec->ip_proto) {
+ case IPPROTO_TCP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
+ EFX_FARCH_FILTER_TCP_WILD);
+ break;
+ case IPPROTO_UDP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
+ EFX_FARCH_FILTER_UDP_WILD);
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ /* Filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * (= zero for wildcard) addresses.
+ */
+ rhost = is_full ? gen_spec->rem_host[0] : 0;
+ rport = is_full ? gen_spec->rem_port : 0;
+ host1 = rhost;
+ host2 = gen_spec->loc_host[0];
+ if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
+ port1 = gen_spec->loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->loc_port;
+ }
+ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+ spec->data[2] = ntohl(host2);
+
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = true;
+ /* fall through */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
+ EFX_FARCH_FILTER_MAC_WILD);
+ spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
+ spec->data[1] = (gen_spec->loc_mac[2] << 24 |
+ gen_spec->loc_mac[3] << 16 |
+ gen_spec->loc_mac[4] << 8 |
+ gen_spec->loc_mac[5]);
+ spec->data[2] = (gen_spec->loc_mac[0] << 8 |
+ gen_spec->loc_mac[1]);
+ break;
+
+ case EFX_FILTER_MATCH_LOC_MAC_IG:
+ spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
+ EFX_FARCH_FILTER_MC_DEF :
+ EFX_FARCH_FILTER_UC_DEF);
+ memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
+ break;
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return 0;
+}
+
+static void
+efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
+ const struct efx_farch_filter_spec *spec)
+{
+ bool is_full = false;
+
+ /* *gen_spec should be completely initialised, to be consistent
+ * with efx_filter_init_{rx,tx}() and in case we want to copy
+ * it back to userland.
+ */
+ memset(gen_spec, 0, sizeof(*gen_spec));
+
+ gen_spec->priority = spec->priority;
+ gen_spec->flags = spec->flags;
+ gen_spec->dmaq_id = spec->dmaq_id;
+
+ switch (spec->type) {
+ case EFX_FARCH_FILTER_TCP_FULL:
+ case EFX_FARCH_FILTER_UDP_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_TCP_WILD:
+ case EFX_FARCH_FILTER_UDP_WILD: {
+ __be32 host1, host2;
+ __be16 port1, port2;
+
+ gen_spec->match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ if (is_full)
+ gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_REM_PORT);
+ gen_spec->ether_type = htons(ETH_P_IP);
+ gen_spec->ip_proto =
+ (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
+ spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
+ IPPROTO_TCP : IPPROTO_UDP;
+
+ host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+ port1 = htons(spec->data[0]);
+ host2 = htonl(spec->data[2]);
+ port2 = htons(spec->data[1] >> 16);
+ if (spec->flags & EFX_FILTER_FLAG_TX) {
+ gen_spec->loc_host[0] = host1;
+ gen_spec->rem_host[0] = host2;
+ } else {
+ gen_spec->loc_host[0] = host2;
+ gen_spec->rem_host[0] = host1;
+ }
+ if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
+ (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
+ gen_spec->loc_port = port1;
+ gen_spec->rem_port = port2;
+ } else {
+ gen_spec->loc_port = port2;
+ gen_spec->rem_port = port1;
+ }
+
+ break;
+ }
+
+ case EFX_FARCH_FILTER_MAC_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_MAC_WILD:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
+ if (is_full)
+ gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ gen_spec->loc_mac[0] = spec->data[2] >> 8;
+ gen_spec->loc_mac[1] = spec->data[2];
+ gen_spec->loc_mac[2] = spec->data[1] >> 24;
+ gen_spec->loc_mac[3] = spec->data[1] >> 16;
+ gen_spec->loc_mac[4] = spec->data[1] >> 8;
+ gen_spec->loc_mac[5] = spec->data[1];
+ gen_spec->outer_vid = htons(spec->data[0]);
+ break;
+
+ case EFX_FARCH_FILTER_UC_DEF:
+ case EFX_FARCH_FILTER_MC_DEF:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
+ gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
+ struct efx_farch_filter_spec *spec)
+{
+ /* If there's only one channel then disable RSS for non VF
+ * traffic, thereby allowing VFs to use RSS when the PF can't.
+ */
+ spec->priority = EFX_FILTER_PRI_REQUIRED;
+ spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
+ (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
+ (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
+ spec->dmaq_id = 0;
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static u32 efx_farch_filter_build(efx_oword_t *filter,
+ struct efx_farch_filter_spec *spec)
+{
+ u32 data3;
+
+ switch (efx_farch_filter_spec_table_id(spec)) {
+ case EFX_FARCH_FILTER_TABLE_RX_IP: {
+ bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
+ spec->type == EFX_FARCH_FILTER_UDP_WILD);
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_BZ_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_BZ_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_BZ_TCP_UDP, is_udp,
+ FRF_BZ_RXQ_ID, spec->dmaq_id,
+ EFX_DWORD_2, spec->data[2],
+ EFX_DWORD_1, spec->data[1],
+ EFX_DWORD_0, spec->data[0]);
+ data3 = is_udp;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_RX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_CZ_RMFT_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_CZ_RMFT_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
+ FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_TX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
+ FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild | spec->dmaq_id << 1;
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
+}
+
+static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
+ const struct efx_farch_filter_spec *right)
+{
+ if (left->type != right->type ||
+ memcmp(left->data, right->data, sizeof(left->data)))
+ return false;
+
+ if (left->flags & EFX_FILTER_FLAG_TX &&
+ left->dmaq_id != right->dmaq_id)
+ return false;
+
+ return true;
+}
+
+/*
+ * Construct/deconstruct external filter IDs. At least the RX filter
+ * IDs must be ordered by matching priority, for RX NFC semantics.
+ *
+ * Deconstruction needs to be robust against invalid IDs so that
+ * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
+ * accept user-provided IDs.
+ */
+
+#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
+
+static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
+ [EFX_FARCH_FILTER_TCP_FULL] = 0,
+ [EFX_FARCH_FILTER_UDP_FULL] = 0,
+ [EFX_FARCH_FILTER_TCP_WILD] = 1,
+ [EFX_FARCH_FILTER_UDP_WILD] = 1,
+ [EFX_FARCH_FILTER_MAC_FULL] = 2,
+ [EFX_FARCH_FILTER_MAC_WILD] = 3,
+ [EFX_FARCH_FILTER_UC_DEF] = 4,
+ [EFX_FARCH_FILTER_MC_DEF] = 4,
+};
+
+static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
+ EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_RX_IP,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
+};
+
+#define EFX_FARCH_FILTER_INDEX_WIDTH 13
+#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32
+efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
+ unsigned int index)
+{
+ unsigned int range;
+
+ range = efx_farch_filter_type_match_pri[spec->type];
+ if (!(spec->flags & EFX_FILTER_FLAG_RX))
+ range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
+
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_farch_filter_table_id
+efx_farch_filter_id_table_id(u32 id)
+{
+ unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
+
+ if (range < ARRAY_SIZE(efx_farch_filter_range_table))
+ return efx_farch_filter_range_table[range];
+ else
+ return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
+}
+
+static inline unsigned int efx_farch_filter_id_index(u32 id)
+{
+ return id & EFX_FARCH_FILTER_INDEX_MASK;
+}
+
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
+ enum efx_farch_filter_table_id table_id;
+
+ do {
+ table_id = efx_farch_filter_range_table[range];
+ if (state->table[table_id].size != 0)
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH |
+ state->table[table_id].size;
+ } while (range--);
+
+ return 0;
+}
+
+s32 efx_farch_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec,
+ bool replace_equal)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec spec;
+ efx_oword_t filter;
+ int rep_index, ins_index;
+ unsigned int depth = 0;
+ int rc;
+
+ rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
+ if (rc)
+ return rc;
+
+ table = &state->table[efx_farch_filter_spec_table_id(&spec)];
+ if (table->size == 0)
+ return -EINVAL;
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: type %d search_limit=%d", __func__, spec.type,
+ table->search_limit[spec.type]);
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ /* One filter spec per type */
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
+ EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
+ rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
+ ins_index = rep_index;
+
+ spin_lock_bh(&efx->filter_lock);
+ } else {
+ /* Search concurrently for
+ * (1) a filter to be replaced (rep_index): any filter
+ * with the same match values, up to the current
+ * search depth for this type, and
+ * (2) the insertion point (ins_index): (1) or any
+ * free slot before it or up to the maximum search
+ * depth for this priority
+ * We fail if we cannot find (2).
+ *
+ * We can stop once either
+ * (a) we find (1), in which case we have definitely
+ * found (2) as well; or
+ * (b) we have searched exhaustively for (1), and have
+ * either found (2) or searched exhaustively for it
+ */
+ u32 key = efx_farch_filter_build(&filter, &spec);
+ unsigned int hash = efx_farch_filter_hash(key);
+ unsigned int incr = efx_farch_filter_increment(key);
+ unsigned int max_rep_depth = table->search_limit[spec.type];
+ unsigned int max_ins_depth =
+ spec.priority <= EFX_FILTER_PRI_HINT ?
+ EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
+ EFX_FARCH_FILTER_CTL_SRCH_MAX;
+ unsigned int i = hash & (table->size - 1);
+
+ ins_index = -1;
+ depth = 1;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ if (!test_bit(i, table->used_bitmap)) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_farch_filter_equal(&spec,
+ &table->spec[i])) {
+ /* Case (a) */
+ if (ins_index < 0)
+ ins_index = i;
+ rep_index = i;
+ break;
+ }
+
+ if (depth >= max_rep_depth &&
+ (ins_index >= 0 || depth >= max_ins_depth)) {
+ /* Case (b) */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ rep_index = -1;
+ break;
+ }
+
+ i = (i + incr) & (table->size - 1);
+ ++depth;
+ }
+ }
+
+ /* If we found a filter to be replaced, check whether we
+ * should do so
+ */
+ if (rep_index >= 0) {
+ struct efx_farch_filter_spec *saved_spec =
+ &table->spec[rep_index];
+
+ if (spec.priority == saved_spec->priority && !replace_equal) {
+ rc = -EEXIST;
+ goto out;
+ }
+ if (spec.priority < saved_spec->priority &&
+ !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
+ saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
+ rc = -EPERM;
+ goto out;
+ }
+ if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
+ /* Just make sure it won't be removed */
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ rc = 0;
+ goto out;
+ }
+ /* Retain the RX_STACK flag */
+ spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
+ }
+
+ /* Insert the filter */
+ if (ins_index != rep_index) {
+ __set_bit(ins_index, table->used_bitmap);
+ ++table->used;
+ }
+ table->spec[ins_index] = spec;
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ if (table->search_limit[spec.type] < depth) {
+ table->search_limit[spec.type] = depth;
+ if (spec.flags & EFX_FILTER_FLAG_TX)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+
+ efx_writeo(efx, &filter,
+ table->offset + table->step * ins_index);
+
+ /* If we were able to replace a filter by inserting
+ * at a lower depth, clear the replaced filter
+ */
+ if (ins_index != rep_index && rep_index >= 0)
+ efx_farch_filter_table_clear_entry(efx, table,
+ rep_index);
+ }
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: filter type %d index %d rxq %u set",
+ __func__, spec.type, ins_index, spec.dmaq_id);
+ rc = efx_farch_filter_make_id(&spec, ins_index);
+
+out:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx)
+{
+ static efx_oword_t filter;
+
+ EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
+ BUG_ON(table->offset == 0); /* can't clear MAC default filters */
+
+ __clear_bit(filter_idx, table->used_bitmap);
+ --table->used;
+ memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
+
+ efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
+
+ /* If this filter required a greater search depth than
+ * any other, the search limit for its type can now be
+ * decreased. However, it is hard to determine that
+ * unless the table has become completely empty - in
+ * which case, all its search limits can be set to 0.
+ */
+ if (unlikely(table->used == 0)) {
+ memset(table->search_limit, 0, sizeof(table->search_limit));
+ if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+}
+
+static int efx_farch_filter_remove(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
+
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ spec->priority > priority)
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ efx_farch_filter_init_rx_for_stack(efx, spec);
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ efx_farch_filter_table_clear_entry(efx, table, filter_idx);
+ }
+
+ return 0;
+}
+
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ struct efx_farch_filter_spec *spec;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+ rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority) {
+ efx_farch_filter_to_gen_spec(spec_buf, spec);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear(struct efx_nic *efx,
+ enum efx_farch_filter_table_id table_id,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table = &state->table[table_id];
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
+ efx_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
+ priority);
+}
+
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ u32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority)
+ ++count;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ goto out;
+ }
+ buf[count++] = efx_farch_filter_make_id(
+ &table->spec[filter_idx], filter_idx);
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+/* Restore filter stater after reset */
+void efx_farch_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+
+ /* Check whether this is a regular register table */
+ if (table->step == 0)
+ continue;
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap))
+ continue;
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+ efx_farch_filter_push_tx_limits(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void efx_farch_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ kfree(state->table[table_id].used_bitmap);
+ vfree(state->table[table_id].spec);
+ }
+ kfree(state);
+}
+
+int efx_farch_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state;
+ struct efx_farch_filter_table *table;
+ unsigned table_id;
+
+ state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ efx->filter_state = state;
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table->offset = FR_BZ_RX_FILTER_TBL0;
+ table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+ table->step = FR_BZ_RX_FILTER_TBL0_STEP;
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
+ table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
+ table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
+ }
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ if (table->size == 0)
+ continue;
+ table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!table->used_bitmap)
+ goto fail;
+ table->spec = vzalloc(table->size * sizeof(*table->spec));
+ if (!table->spec)
+ goto fail;
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ /* RX default filters must always exist */
+ struct efx_farch_filter_spec *spec;
+ unsigned i;
+
+ for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
+ spec = &table->spec[i];
+ spec->type = EFX_FARCH_FILTER_UC_DEF + i;
+ efx_farch_filter_init_rx_for_stack(efx, spec);
+ __set_bit(i, table->used_bitmap);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ return 0;
+
+fail:
+ efx_farch_filter_table_remove(efx);
+ return -ENOMEM;
+}
+
+/* Update scatter enable flags for filters pointing to our own RX queues */
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ table->spec[filter_idx].dmaq_id >=
+ efx->n_rx_channels)
+ continue;
+
+ if (efx->rx_scatter)
+ table->spec[filter_idx].flags |=
+ EFX_FILTER_FLAG_RX_SCATTER;
+ else
+ table->spec[filter_idx].flags &=
+ ~EFX_FILTER_FLAG_RX_SCATTER;
+
+ if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
+ /* Pushed by efx_farch_filter_push_rx_config() */
+ continue;
+
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec)
+{
+ return efx_farch_filter_insert(efx, gen_spec, true);
+}
+
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table =
+ &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+
+ if (test_bit(index, table->used_bitmap) &&
+ table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+ rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
+ flow_id, index)) {
+ efx_farch_filter_table_clear_entry(efx, table, index);
+ return true;
+ }
+
+ return false;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *ha;
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+ u32 crc;
+ int bit;
+
+ netif_addr_lock_bh(net_dev);
+
+ efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
+
+ /* Build multicast hash table */
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ memset(mc_hash, 0xff, sizeof(*mc_hash));
+ } else {
+ memset(mc_hash, 0x00, sizeof(*mc_hash));
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
+ __set_bit_le(bit, mc_hash);
+ }
+
+ /* Broadcast packets go through the multicast hash filter.
+ * ether_crc_le() of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask.
+ */
+ __set_bit_le(0xff, mc_hash);
+ }
+
+ netif_addr_unlock_bh(net_dev);
+}
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/farch_regs.h
index ade4c4dc56ca..7019a712e799 100644
--- a/drivers/net/ethernet/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/farch_regs.h
@@ -1,15 +1,15 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
-#ifndef EFX_REGS_H
-#define EFX_REGS_H
+#ifndef EFX_FARCH_REGS_H
+#define EFX_FARCH_REGS_H
/*
* Falcon hardware architecture definitions have a name prefix following
@@ -2925,264 +2925,8 @@
#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- *
- */
-
-#define GRxGoodOct_offset 0x0
-#define GRxGoodOct_WIDTH 48
-#define GRxBadOct_offset 0x8
-#define GRxBadOct_WIDTH 48
-#define GRxMissPkt_offset 0x10
-#define GRxMissPkt_WIDTH 32
-#define GRxFalseCRS_offset 0x14
-#define GRxFalseCRS_WIDTH 32
-#define GRxPausePkt_offset 0x18
-#define GRxPausePkt_WIDTH 32
-#define GRxBadPkt_offset 0x1C
-#define GRxBadPkt_WIDTH 32
-#define GRxUcastPkt_offset 0x20
-#define GRxUcastPkt_WIDTH 32
-#define GRxMcastPkt_offset 0x24
-#define GRxMcastPkt_WIDTH 32
-#define GRxBcastPkt_offset 0x28
-#define GRxBcastPkt_WIDTH 32
-#define GRxGoodLt64Pkt_offset 0x2C
-#define GRxGoodLt64Pkt_WIDTH 32
-#define GRxBadLt64Pkt_offset 0x30
-#define GRxBadLt64Pkt_WIDTH 32
-#define GRx64Pkt_offset 0x34
-#define GRx64Pkt_WIDTH 32
-#define GRx65to127Pkt_offset 0x38
-#define GRx65to127Pkt_WIDTH 32
-#define GRx128to255Pkt_offset 0x3C
-#define GRx128to255Pkt_WIDTH 32
-#define GRx256to511Pkt_offset 0x40
-#define GRx256to511Pkt_WIDTH 32
-#define GRx512to1023Pkt_offset 0x44
-#define GRx512to1023Pkt_WIDTH 32
-#define GRx1024to15xxPkt_offset 0x48
-#define GRx1024to15xxPkt_WIDTH 32
-#define GRx15xxtoJumboPkt_offset 0x4C
-#define GRx15xxtoJumboPkt_WIDTH 32
-#define GRxGtJumboPkt_offset 0x50
-#define GRxGtJumboPkt_WIDTH 32
-#define GRxFcsErr64to15xxPkt_offset 0x54
-#define GRxFcsErr64to15xxPkt_WIDTH 32
-#define GRxFcsErr15xxtoJumboPkt_offset 0x58
-#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
-#define GRxFcsErrGtJumboPkt_offset 0x5C
-#define GRxFcsErrGtJumboPkt_WIDTH 32
-#define GTxGoodBadOct_offset 0x80
-#define GTxGoodBadOct_WIDTH 48
-#define GTxGoodOct_offset 0x88
-#define GTxGoodOct_WIDTH 48
-#define GTxSglColPkt_offset 0x90
-#define GTxSglColPkt_WIDTH 32
-#define GTxMultColPkt_offset 0x94
-#define GTxMultColPkt_WIDTH 32
-#define GTxExColPkt_offset 0x98
-#define GTxExColPkt_WIDTH 32
-#define GTxDefPkt_offset 0x9C
-#define GTxDefPkt_WIDTH 32
-#define GTxLateCol_offset 0xA0
-#define GTxLateCol_WIDTH 32
-#define GTxExDefPkt_offset 0xA4
-#define GTxExDefPkt_WIDTH 32
-#define GTxPausePkt_offset 0xA8
-#define GTxPausePkt_WIDTH 32
-#define GTxBadPkt_offset 0xAC
-#define GTxBadPkt_WIDTH 32
-#define GTxUcastPkt_offset 0xB0
-#define GTxUcastPkt_WIDTH 32
-#define GTxMcastPkt_offset 0xB4
-#define GTxMcastPkt_WIDTH 32
-#define GTxBcastPkt_offset 0xB8
-#define GTxBcastPkt_WIDTH 32
-#define GTxLt64Pkt_offset 0xBC
-#define GTxLt64Pkt_WIDTH 32
-#define GTx64Pkt_offset 0xC0
-#define GTx64Pkt_WIDTH 32
-#define GTx65to127Pkt_offset 0xC4
-#define GTx65to127Pkt_WIDTH 32
-#define GTx128to255Pkt_offset 0xC8
-#define GTx128to255Pkt_WIDTH 32
-#define GTx256to511Pkt_offset 0xCC
-#define GTx256to511Pkt_WIDTH 32
-#define GTx512to1023Pkt_offset 0xD0
-#define GTx512to1023Pkt_WIDTH 32
-#define GTx1024to15xxPkt_offset 0xD4
-#define GTx1024to15xxPkt_WIDTH 32
-#define GTx15xxtoJumboPkt_offset 0xD8
-#define GTx15xxtoJumboPkt_WIDTH 32
-#define GTxGtJumboPkt_offset 0xDC
-#define GTxGtJumboPkt_WIDTH 32
-#define GTxNonTcpUdpPkt_offset 0xE0
-#define GTxNonTcpUdpPkt_WIDTH 16
-#define GTxMacSrcErrPkt_offset 0xE4
-#define GTxMacSrcErrPkt_WIDTH 16
-#define GTxIpSrcErrPkt_offset 0xE8
-#define GTxIpSrcErrPkt_WIDTH 16
-#define GDmaDone_offset 0xEC
-#define GDmaDone_WIDTH 32
-
-#define XgRxOctets_offset 0x0
-#define XgRxOctets_WIDTH 48
-#define XgRxOctetsOK_offset 0x8
-#define XgRxOctetsOK_WIDTH 48
-#define XgRxPkts_offset 0x10
-#define XgRxPkts_WIDTH 32
-#define XgRxPktsOK_offset 0x14
-#define XgRxPktsOK_WIDTH 32
-#define XgRxBroadcastPkts_offset 0x18
-#define XgRxBroadcastPkts_WIDTH 32
-#define XgRxMulticastPkts_offset 0x1C
-#define XgRxMulticastPkts_WIDTH 32
-#define XgRxUnicastPkts_offset 0x20
-#define XgRxUnicastPkts_WIDTH 32
-#define XgRxUndersizePkts_offset 0x24
-#define XgRxUndersizePkts_WIDTH 32
-#define XgRxOversizePkts_offset 0x28
-#define XgRxOversizePkts_WIDTH 32
-#define XgRxJabberPkts_offset 0x2C
-#define XgRxJabberPkts_WIDTH 32
-#define XgRxUndersizeFCSerrorPkts_offset 0x30
-#define XgRxUndersizeFCSerrorPkts_WIDTH 32
-#define XgRxDropEvents_offset 0x34
-#define XgRxDropEvents_WIDTH 32
-#define XgRxFCSerrorPkts_offset 0x38
-#define XgRxFCSerrorPkts_WIDTH 32
-#define XgRxAlignError_offset 0x3C
-#define XgRxAlignError_WIDTH 32
-#define XgRxSymbolError_offset 0x40
-#define XgRxSymbolError_WIDTH 32
-#define XgRxInternalMACError_offset 0x44
-#define XgRxInternalMACError_WIDTH 32
-#define XgRxControlPkts_offset 0x48
-#define XgRxControlPkts_WIDTH 32
-#define XgRxPausePkts_offset 0x4C
-#define XgRxPausePkts_WIDTH 32
-#define XgRxPkts64Octets_offset 0x50
-#define XgRxPkts64Octets_WIDTH 32
-#define XgRxPkts65to127Octets_offset 0x54
-#define XgRxPkts65to127Octets_WIDTH 32
-#define XgRxPkts128to255Octets_offset 0x58
-#define XgRxPkts128to255Octets_WIDTH 32
-#define XgRxPkts256to511Octets_offset 0x5C
-#define XgRxPkts256to511Octets_WIDTH 32
-#define XgRxPkts512to1023Octets_offset 0x60
-#define XgRxPkts512to1023Octets_WIDTH 32
-#define XgRxPkts1024to15xxOctets_offset 0x64
-#define XgRxPkts1024to15xxOctets_WIDTH 32
-#define XgRxPkts15xxtoMaxOctets_offset 0x68
-#define XgRxPkts15xxtoMaxOctets_WIDTH 32
-#define XgRxLengthError_offset 0x6C
-#define XgRxLengthError_WIDTH 32
-#define XgTxPkts_offset 0x80
-#define XgTxPkts_WIDTH 32
-#define XgTxOctets_offset 0x88
-#define XgTxOctets_WIDTH 48
-#define XgTxMulticastPkts_offset 0x90
-#define XgTxMulticastPkts_WIDTH 32
-#define XgTxBroadcastPkts_offset 0x94
-#define XgTxBroadcastPkts_WIDTH 32
-#define XgTxUnicastPkts_offset 0x98
-#define XgTxUnicastPkts_WIDTH 32
-#define XgTxControlPkts_offset 0x9C
-#define XgTxControlPkts_WIDTH 32
-#define XgTxPausePkts_offset 0xA0
-#define XgTxPausePkts_WIDTH 32
-#define XgTxPkts64Octets_offset 0xA4
-#define XgTxPkts64Octets_WIDTH 32
-#define XgTxPkts65to127Octets_offset 0xA8
-#define XgTxPkts65to127Octets_WIDTH 32
-#define XgTxPkts128to255Octets_offset 0xAC
-#define XgTxPkts128to255Octets_WIDTH 32
-#define XgTxPkts256to511Octets_offset 0xB0
-#define XgTxPkts256to511Octets_WIDTH 32
-#define XgTxPkts512to1023Octets_offset 0xB4
-#define XgTxPkts512to1023Octets_WIDTH 32
-#define XgTxPkts1024to15xxOctets_offset 0xB8
-#define XgTxPkts1024to15xxOctets_WIDTH 32
-#define XgTxPkts1519toMaxOctets_offset 0xBC
-#define XgTxPkts1519toMaxOctets_WIDTH 32
-#define XgTxUndersizePkts_offset 0xC0
-#define XgTxUndersizePkts_WIDTH 32
-#define XgTxOversizePkts_offset 0xC4
-#define XgTxOversizePkts_WIDTH 32
-#define XgTxNonTcpUdpPkt_offset 0xC8
-#define XgTxNonTcpUdpPkt_WIDTH 16
-#define XgTxMacSrcErrPkt_offset 0xCC
-#define XgTxMacSrcErrPkt_WIDTH 16
-#define XgTxIpSrcErrPkt_offset 0xD0
-#define XgTxIpSrcErrPkt_WIDTH 16
-#define XgDmaDone_offset 0xD4
-#define XgDmaDone_WIDTH 32
-
-#define FALCON_STATS_NOT_DONE 0x00000000
-#define FALCON_STATS_DONE 0xffffffff
-
-/**************************************************************************
- *
- * Falcon non-volatile configuration
- *
- **************************************************************************
- */
+/* RX packet prefix */
+#define FS_BZ_RX_PREFIX_HASH_OFST 12
+#define FS_BZ_RX_PREFIX_SIZE 16
-/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
-struct falcon_nvconfig_board_v2 {
- __le16 nports;
- u8 port0_phy_addr;
- u8 port0_phy_type;
- u8 port1_phy_addr;
- u8 port1_phy_type;
- __le16 asic_sub_revision;
- __le16 board_revision;
-} __packed;
-
-/* Board configuration v3 extra information */
-struct falcon_nvconfig_board_v3 {
- __le32 spi_device_type[2];
-} __packed;
-
-/* Bit numbers for spi_device_type */
-#define SPI_DEV_TYPE_SIZE_LBN 0
-#define SPI_DEV_TYPE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
-#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
-#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
-#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
-#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
-#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
-#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_FIELD(type, field) \
- (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
-
-#define FALCON_NVCONFIG_OFFSET 0x300
-
-#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
-struct falcon_nvconfig {
- efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
- u8 mac_address[2][8]; /* 0x310 */
- efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
- efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
- efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
- efx_oword_t hw_init_reg; /* 0x350 */
- efx_oword_t nic_stat_reg; /* 0x360 */
- efx_oword_t glb_ctl_reg; /* 0x370 */
- efx_oword_t srm_cfg_reg; /* 0x380 */
- efx_oword_t spare_reg; /* 0x390 */
- __le16 board_magic_num; /* 0x3A0 */
- __le16 board_struct_ver;
- __le16 board_checksum;
- struct falcon_nvconfig_board_v2 board_v2;
- efx_oword_t ee_base_page_reg; /* 0x3B0 */
- struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
-} __packed;
-
-#endif /* EFX_REGS_H */
+#endif /* EFX_FARCH_REGS_H */
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
deleted file mode 100644
index 30d744235d27..000000000000
--- a/drivers/net/ethernet/sfc/filter.c
+++ /dev/null
@@ -1,1274 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/in.h>
-#include <net/ip.h>
-#include "efx.h"
-#include "filter.h"
-#include "io.h"
-#include "nic.h"
-#include "regs.h"
-
-/* "Fudge factors" - difference between programmed value and actual depth.
- * Due to pipelined implementation we need to program H/W with a value that
- * is larger than the hop limit we want.
- */
-#define FILTER_CTL_SRCH_FUDGE_WILD 3
-#define FILTER_CTL_SRCH_FUDGE_FULL 1
-
-/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
- * We also need to avoid infinite loops in efx_filter_search() when the
- * table is full.
- */
-#define FILTER_CTL_SRCH_MAX 200
-
-/* Don't try very hard to find space for performance hints, as this is
- * counter-productive. */
-#define FILTER_CTL_SRCH_HINT_MAX 5
-
-enum efx_filter_table_id {
- EFX_FILTER_TABLE_RX_IP = 0,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_DEF,
- EFX_FILTER_TABLE_TX_MAC,
- EFX_FILTER_TABLE_COUNT,
-};
-
-enum efx_filter_index {
- EFX_FILTER_INDEX_UC_DEF,
- EFX_FILTER_INDEX_MC_DEF,
- EFX_FILTER_SIZE_RX_DEF,
-};
-
-struct efx_filter_table {
- enum efx_filter_table_id id;
- u32 offset; /* address of table relative to BAR */
- unsigned size; /* number of entries */
- unsigned step; /* step between entries */
- unsigned used; /* number currently used */
- unsigned long *used_bitmap;
- struct efx_filter_spec *spec;
- unsigned search_depth[EFX_FILTER_TYPE_COUNT];
-};
-
-struct efx_filter_state {
- spinlock_t lock;
- struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
-#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
- unsigned rps_expire_index;
-#endif
-};
-
-static void efx_filter_table_clear_entry(struct efx_nic *efx,
- struct efx_filter_table *table,
- unsigned int filter_idx);
-
-/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
- * key derived from the n-tuple. The initial LFSR state is 0xffff. */
-static u16 efx_filter_hash(u32 key)
-{
- u16 tmp;
-
- /* First 16 rounds */
- tmp = 0x1fff ^ key >> 16;
- tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
- tmp = tmp ^ tmp >> 9;
- /* Last 16 rounds */
- tmp = tmp ^ tmp << 13 ^ key;
- tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
- return tmp ^ tmp >> 9;
-}
-
-/* To allow for hash collisions, filter search continues at these
- * increments from the first possible entry selected by the hash. */
-static u16 efx_filter_increment(u32 key)
-{
- return key * 2 - 1;
-}
-
-static enum efx_filter_table_id
-efx_filter_spec_table_id(const struct efx_filter_spec *spec)
-{
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
- EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
- return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
-}
-
-static struct efx_filter_table *
-efx_filter_spec_table(struct efx_filter_state *state,
- const struct efx_filter_spec *spec)
-{
- if (spec->type == EFX_FILTER_UNSPEC)
- return NULL;
- else
- return &state->table[efx_filter_spec_table_id(spec)];
-}
-
-static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
-{
- memset(table->search_depth, 0, sizeof(table->search_depth));
-}
-
-static void efx_filter_push_rx_config(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table;
- efx_oword_t filter_ctl;
-
- efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
-
- table = &state->table[EFX_FILTER_TABLE_RX_IP];
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_TCP_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_TCP_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_UDP_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_UDP_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
-
- table = &state->table[EFX_FILTER_TABLE_RX_MAC];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
- table->search_depth[EFX_FILTER_MAC_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
- table->search_depth[EFX_FILTER_MAC_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- }
-
- table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
- table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
- !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
- EFX_FILTER_FLAG_RX_RSS));
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
- table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
- !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
- EFX_FILTER_FLAG_RX_RSS));
-
- /* There is a single bit to enable RX scatter for all
- * unmatched packets. Only set it if scatter is
- * enabled in both filter specs.
- */
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
- !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
- table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
- EFX_FILTER_FLAG_RX_SCATTER));
- } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- /* We don't expose 'default' filters because unmatched
- * packets always go to the queue number found in the
- * RSS table. But we still need to set the RX scatter
- * bit here.
- */
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
- efx->rx_scatter);
- }
-
- efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
-}
-
-static void efx_filter_push_tx_limits(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table;
- efx_oword_t tx_cfg;
-
- efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
-
- table = &state->table[EFX_FILTER_TABLE_TX_MAC];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
- table->search_depth[EFX_FILTER_MAC_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(
- tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
- table->search_depth[EFX_FILTER_MAC_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- }
-
- efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
-}
-
-static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
- __be32 host1, __be16 port1,
- __be32 host2, __be16 port2)
-{
- spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
- spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
- spec->data[2] = ntohl(host2);
-}
-
-static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
- __be32 *host1, __be16 *port1,
- __be32 *host2, __be16 *port2)
-{
- *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
- *port1 = htons(spec->data[0]);
- *host2 = htonl(spec->data[2]);
- *port2 = htons(spec->data[1] >> 16);
-}
-
-/**
- * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
- * @spec: Specification to initialise
- * @proto: Transport layer protocol number
- * @host: Local host address (network byte order)
- * @port: Local port (network byte order)
- */
-int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port)
-{
- __be32 host1;
- __be16 port1;
-
- EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (port == 0)
- return -EINVAL;
-
- switch (proto) {
- case IPPROTO_TCP:
- spec->type = EFX_FILTER_TCP_WILD;
- break;
- case IPPROTO_UDP:
- spec->type = EFX_FILTER_UDP_WILD;
- break;
- default:
- return -EPROTONOSUPPORT;
- }
-
- /* Filter is constructed in terms of source and destination,
- * with the odd wrinkle that the ports are swapped in a UDP
- * wildcard filter. We need to convert from local and remote
- * (= zero for wildcard) addresses.
- */
- host1 = 0;
- if (proto != IPPROTO_UDP) {
- port1 = 0;
- } else {
- port1 = port;
- port = 0;
- }
-
- __efx_filter_set_ipv4(spec, host1, port1, host, port);
- return 0;
-}
-
-int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port)
-{
- __be32 host1;
- __be16 port1;
-
- switch (spec->type) {
- case EFX_FILTER_TCP_WILD:
- *proto = IPPROTO_TCP;
- __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
- return 0;
- case EFX_FILTER_UDP_WILD:
- *proto = IPPROTO_UDP;
- __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/**
- * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
- * @spec: Specification to initialise
- * @proto: Transport layer protocol number
- * @host: Local host address (network byte order)
- * @port: Local port (network byte order)
- * @rhost: Remote host address (network byte order)
- * @rport: Remote port (network byte order)
- */
-int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port,
- __be32 rhost, __be16 rport)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (port == 0 || rport == 0)
- return -EINVAL;
-
- switch (proto) {
- case IPPROTO_TCP:
- spec->type = EFX_FILTER_TCP_FULL;
- break;
- case IPPROTO_UDP:
- spec->type = EFX_FILTER_UDP_FULL;
- break;
- default:
- return -EPROTONOSUPPORT;
- }
-
- __efx_filter_set_ipv4(spec, rhost, rport, host, port);
- return 0;
-}
-
-int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port,
- __be32 *rhost, __be16 *rport)
-{
- switch (spec->type) {
- case EFX_FILTER_TCP_FULL:
- *proto = IPPROTO_TCP;
- break;
- case EFX_FILTER_UDP_FULL:
- *proto = IPPROTO_UDP;
- break;
- default:
- return -EINVAL;
- }
-
- __efx_filter_get_ipv4(spec, rhost, rport, host, port);
- return 0;
-}
-
-/**
- * efx_filter_set_eth_local - specify local Ethernet address and optional VID
- * @spec: Specification to initialise
- * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
- * @addr: Local Ethernet MAC address
- */
-int efx_filter_set_eth_local(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (vid == EFX_FILTER_VID_UNSPEC) {
- spec->type = EFX_FILTER_MAC_WILD;
- spec->data[0] = 0;
- } else {
- spec->type = EFX_FILTER_MAC_FULL;
- spec->data[0] = vid;
- }
-
- spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
- spec->data[2] = addr[0] << 8 | addr[1];
- return 0;
-}
-
-/**
- * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
- * @spec: Specification to initialise
- */
-int efx_filter_set_uc_def(struct efx_filter_spec *spec)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EINVAL;
-
- spec->type = EFX_FILTER_UC_DEF;
- memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
- return 0;
-}
-
-/**
- * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
- * @spec: Specification to initialise
- */
-int efx_filter_set_mc_def(struct efx_filter_spec *spec)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EINVAL;
-
- spec->type = EFX_FILTER_MC_DEF;
- memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
- return 0;
-}
-
-static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- struct efx_filter_spec *spec = &table->spec[filter_idx];
- enum efx_filter_flags flags = 0;
-
- /* If there's only one channel then disable RSS for non VF
- * traffic, thereby allowing VFs to use RSS when the PF can't.
- */
- if (efx->n_rx_channels > 1)
- flags |= EFX_FILTER_FLAG_RX_RSS;
-
- if (efx->rx_scatter)
- flags |= EFX_FILTER_FLAG_RX_SCATTER;
-
- efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
- spec->type = EFX_FILTER_UC_DEF + filter_idx;
- table->used_bitmap[0] |= 1 << filter_idx;
-}
-
-int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
- u16 *vid, u8 *addr)
-{
- switch (spec->type) {
- case EFX_FILTER_MAC_WILD:
- *vid = EFX_FILTER_VID_UNSPEC;
- break;
- case EFX_FILTER_MAC_FULL:
- *vid = spec->data[0];
- break;
- default:
- return -EINVAL;
- }
-
- addr[0] = spec->data[2] >> 8;
- addr[1] = spec->data[2];
- addr[2] = spec->data[1] >> 24;
- addr[3] = spec->data[1] >> 16;
- addr[4] = spec->data[1] >> 8;
- addr[5] = spec->data[1];
- return 0;
-}
-
-/* Build a filter entry and return its n-tuple key. */
-static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
-{
- u32 data3;
-
- switch (efx_filter_spec_table_id(spec)) {
- case EFX_FILTER_TABLE_RX_IP: {
- bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
- spec->type == EFX_FILTER_UDP_WILD);
- EFX_POPULATE_OWORD_7(
- *filter,
- FRF_BZ_RSS_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
- FRF_BZ_SCATTER_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
- FRF_BZ_TCP_UDP, is_udp,
- FRF_BZ_RXQ_ID, spec->dmaq_id,
- EFX_DWORD_2, spec->data[2],
- EFX_DWORD_1, spec->data[1],
- EFX_DWORD_0, spec->data[0]);
- data3 = is_udp;
- break;
- }
-
- case EFX_FILTER_TABLE_RX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
- EFX_POPULATE_OWORD_7(
- *filter,
- FRF_CZ_RMFT_RSS_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
- FRF_CZ_RMFT_SCATTER_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
- FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
- FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
- FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
- FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
- data3 = is_wild;
- break;
- }
-
- case EFX_FILTER_TABLE_TX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
- EFX_POPULATE_OWORD_5(*filter,
- FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
- FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
- FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
- FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
- data3 = is_wild | spec->dmaq_id << 1;
- break;
- }
-
- default:
- BUG();
- }
-
- return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
-}
-
-static bool efx_filter_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
-{
- if (left->type != right->type ||
- memcmp(left->data, right->data, sizeof(left->data)))
- return false;
-
- if (left->flags & EFX_FILTER_FLAG_TX &&
- left->dmaq_id != right->dmaq_id)
- return false;
-
- return true;
-}
-
-/*
- * Construct/deconstruct external filter IDs. At least the RX filter
- * IDs must be ordered by matching priority, for RX NFC semantics.
- *
- * Deconstruction needs to be robust against invalid IDs so that
- * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
- * accept user-provided IDs.
- */
-
-#define EFX_FILTER_MATCH_PRI_COUNT 5
-
-static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
- [EFX_FILTER_TCP_FULL] = 0,
- [EFX_FILTER_UDP_FULL] = 0,
- [EFX_FILTER_TCP_WILD] = 1,
- [EFX_FILTER_UDP_WILD] = 1,
- [EFX_FILTER_MAC_FULL] = 2,
- [EFX_FILTER_MAC_WILD] = 3,
- [EFX_FILTER_UC_DEF] = 4,
- [EFX_FILTER_MC_DEF] = 4,
-};
-
-static const enum efx_filter_table_id efx_filter_range_table[] = {
- EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
- EFX_FILTER_TABLE_RX_IP,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
- EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
- EFX_FILTER_TABLE_COUNT, /* invalid */
- EFX_FILTER_TABLE_TX_MAC,
- EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
-};
-
-#define EFX_FILTER_INDEX_WIDTH 13
-#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
-
-static inline u32
-efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
-{
- unsigned int range;
-
- range = efx_filter_type_match_pri[spec->type];
- if (!(spec->flags & EFX_FILTER_FLAG_RX))
- range += EFX_FILTER_MATCH_PRI_COUNT;
-
- return range << EFX_FILTER_INDEX_WIDTH | index;
-}
-
-static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
-{
- unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
-
- if (range < ARRAY_SIZE(efx_filter_range_table))
- return efx_filter_range_table[range];
- else
- return EFX_FILTER_TABLE_COUNT; /* invalid */
-}
-
-static inline unsigned int efx_filter_id_index(u32 id)
-{
- return id & EFX_FILTER_INDEX_MASK;
-}
-
-static inline u8 efx_filter_id_flags(u32 id)
-{
- unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
-
- if (range < EFX_FILTER_MATCH_PRI_COUNT)
- return EFX_FILTER_FLAG_RX;
- else
- return EFX_FILTER_FLAG_TX;
-}
-
-u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
- enum efx_filter_table_id table_id;
-
- do {
- table_id = efx_filter_range_table[range];
- if (state->table[table_id].size != 0)
- return range << EFX_FILTER_INDEX_WIDTH |
- state->table[table_id].size;
- } while (range--);
-
- return 0;
-}
-
-/**
- * efx_filter_insert_filter - add or replace a filter
- * @efx: NIC in which to insert the filter
- * @spec: Specification for the filter
- * @replace_equal: Flag for whether the specified filter may replace an
- * existing filter with equal priority
- *
- * On success, return the filter ID.
- * On failure, return a negative error code.
- *
- * If an existing filter has equal match values to the new filter
- * spec, then the new filter might replace it, depending on the
- * relative priorities. If the existing filter has lower priority, or
- * if @replace_equal is set and it has equal priority, then it is
- * replaced. Otherwise the function fails, returning -%EPERM if
- * the existing filter has higher priority or -%EEXIST if it has
- * equal priority.
- */
-s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
- bool replace_equal)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- efx_oword_t filter;
- int rep_index, ins_index;
- unsigned int depth = 0;
- int rc;
-
- if (!table || table->size == 0)
- return -EINVAL;
-
- netif_vdbg(efx, hw, efx->net_dev,
- "%s: type %d search_depth=%d", __func__, spec->type,
- table->search_depth[spec->type]);
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- /* One filter spec per type */
- BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
- BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
- EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
- rep_index = spec->type - EFX_FILTER_UC_DEF;
- ins_index = rep_index;
-
- spin_lock_bh(&state->lock);
- } else {
- /* Search concurrently for
- * (1) a filter to be replaced (rep_index): any filter
- * with the same match values, up to the current
- * search depth for this type, and
- * (2) the insertion point (ins_index): (1) or any
- * free slot before it or up to the maximum search
- * depth for this priority
- * We fail if we cannot find (2).
- *
- * We can stop once either
- * (a) we find (1), in which case we have definitely
- * found (2) as well; or
- * (b) we have searched exhaustively for (1), and have
- * either found (2) or searched exhaustively for it
- */
- u32 key = efx_filter_build(&filter, spec);
- unsigned int hash = efx_filter_hash(key);
- unsigned int incr = efx_filter_increment(key);
- unsigned int max_rep_depth = table->search_depth[spec->type];
- unsigned int max_ins_depth =
- spec->priority <= EFX_FILTER_PRI_HINT ?
- FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
- unsigned int i = hash & (table->size - 1);
-
- ins_index = -1;
- depth = 1;
-
- spin_lock_bh(&state->lock);
-
- for (;;) {
- if (!test_bit(i, table->used_bitmap)) {
- if (ins_index < 0)
- ins_index = i;
- } else if (efx_filter_equal(spec, &table->spec[i])) {
- /* Case (a) */
- if (ins_index < 0)
- ins_index = i;
- rep_index = i;
- break;
- }
-
- if (depth >= max_rep_depth &&
- (ins_index >= 0 || depth >= max_ins_depth)) {
- /* Case (b) */
- if (ins_index < 0) {
- rc = -EBUSY;
- goto out;
- }
- rep_index = -1;
- break;
- }
-
- i = (i + incr) & (table->size - 1);
- ++depth;
- }
- }
-
- /* If we found a filter to be replaced, check whether we
- * should do so
- */
- if (rep_index >= 0) {
- struct efx_filter_spec *saved_spec = &table->spec[rep_index];
-
- if (spec->priority == saved_spec->priority && !replace_equal) {
- rc = -EEXIST;
- goto out;
- }
- if (spec->priority < saved_spec->priority) {
- rc = -EPERM;
- goto out;
- }
- }
-
- /* Insert the filter */
- if (ins_index != rep_index) {
- __set_bit(ins_index, table->used_bitmap);
- ++table->used;
- }
- table->spec[ins_index] = *spec;
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- efx_filter_push_rx_config(efx);
- } else {
- if (table->search_depth[spec->type] < depth) {
- table->search_depth[spec->type] = depth;
- if (spec->flags & EFX_FILTER_FLAG_TX)
- efx_filter_push_tx_limits(efx);
- else
- efx_filter_push_rx_config(efx);
- }
-
- efx_writeo(efx, &filter,
- table->offset + table->step * ins_index);
-
- /* If we were able to replace a filter by inserting
- * at a lower depth, clear the replaced filter
- */
- if (ins_index != rep_index && rep_index >= 0)
- efx_filter_table_clear_entry(efx, table, rep_index);
- }
-
- netif_vdbg(efx, hw, efx->net_dev,
- "%s: filter type %d index %d rxq %u set",
- __func__, spec->type, ins_index, spec->dmaq_id);
- rc = efx_filter_make_id(spec, ins_index);
-
-out:
- spin_unlock_bh(&state->lock);
- return rc;
-}
-
-static void efx_filter_table_clear_entry(struct efx_nic *efx,
- struct efx_filter_table *table,
- unsigned int filter_idx)
-{
- static efx_oword_t filter;
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- /* RX default filters must always exist */
- efx_filter_reset_rx_def(efx, filter_idx);
- efx_filter_push_rx_config(efx);
- } else if (test_bit(filter_idx, table->used_bitmap)) {
- __clear_bit(filter_idx, table->used_bitmap);
- --table->used;
- memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
-
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
-}
-
-/**
- * efx_filter_remove_id_safe - remove a filter by ID, carefully
- * @efx: NIC from which to remove the filter
- * @priority: Priority of filter, as passed to @efx_filter_insert_filter
- * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
- *
- * This function will range-check @filter_id, so it is safe to call
- * with a value passed from userland.
- */
-int efx_filter_remove_id_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- struct efx_filter_spec *spec;
- u8 filter_flags;
- int rc;
-
- table_id = efx_filter_id_table_id(filter_id);
- if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
- return -ENOENT;
- table = &state->table[table_id];
-
- filter_idx = efx_filter_id_index(filter_id);
- if (filter_idx >= table->size)
- return -ENOENT;
- spec = &table->spec[filter_idx];
-
- filter_flags = efx_filter_id_flags(filter_id);
-
- spin_lock_bh(&state->lock);
-
- if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority) {
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
- rc = 0;
- } else {
- rc = -ENOENT;
- }
-
- spin_unlock_bh(&state->lock);
-
- return rc;
-}
-
-/**
- * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
- * @efx: NIC from which to remove the filter
- * @priority: Priority of filter, as passed to @efx_filter_insert_filter
- * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
- * @spec: Buffer in which to store filter specification
- *
- * This function will range-check @filter_id, so it is safe to call
- * with a value passed from userland.
- */
-int efx_filter_get_filter_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *spec_buf)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- u8 filter_flags;
- int rc;
-
- table_id = efx_filter_id_table_id(filter_id);
- if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
- return -ENOENT;
- table = &state->table[table_id];
-
- filter_idx = efx_filter_id_index(filter_id);
- if (filter_idx >= table->size)
- return -ENOENT;
- spec = &table->spec[filter_idx];
-
- filter_flags = efx_filter_id_flags(filter_id);
-
- spin_lock_bh(&state->lock);
-
- if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority) {
- *spec_buf = *spec;
- rc = 0;
- } else {
- rc = -ENOENT;
- }
-
- spin_unlock_bh(&state->lock);
-
- return rc;
-}
-
-static void efx_filter_table_clear(struct efx_nic *efx,
- enum efx_filter_table_id table_id,
- enum efx_filter_priority priority)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[table_id];
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
- if (table->spec[filter_idx].priority <= priority)
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
-
- spin_unlock_bh(&state->lock);
-}
-
-/**
- * efx_filter_clear_rx - remove RX filters by priority
- * @efx: NIC from which to remove the filters
- * @priority: Maximum priority to remove
- */
-void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
-{
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
-}
-
-u32 efx_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- u32 count = 0;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (test_bit(filter_idx, table->used_bitmap) &&
- table->spec[filter_idx].priority == priority)
- ++count;
- }
- }
-
- spin_unlock_bh(&state->lock);
-
- return count;
-}
-
-s32 efx_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- s32 count = 0;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (test_bit(filter_idx, table->used_bitmap) &&
- table->spec[filter_idx].priority == priority) {
- if (count == size) {
- count = -EMSGSIZE;
- goto out;
- }
- buf[count++] = efx_filter_make_id(
- &table->spec[filter_idx], filter_idx);
- }
- }
- }
-out:
- spin_unlock_bh(&state->lock);
-
- return count;
-}
-
-/* Restore filter stater after reset */
-void efx_restore_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- efx_oword_t filter;
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- table = &state->table[table_id];
-
- /* Check whether this is a regular register table */
- if (table->step == 0)
- continue;
-
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (!test_bit(filter_idx, table->used_bitmap))
- continue;
- efx_filter_build(&filter, &table->spec[filter_idx]);
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
- }
-
- efx_filter_push_rx_config(efx);
- efx_filter_push_tx_limits(efx);
-
- spin_unlock_bh(&state->lock);
-}
-
-int efx_probe_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state;
- struct efx_filter_table *table;
- unsigned table_id;
-
- state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
- efx->filter_state = state;
-
- spin_lock_init(&state->lock);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
-#ifdef CONFIG_RFS_ACCEL
- state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
- sizeof(*state->rps_flow_id),
- GFP_KERNEL);
- if (!state->rps_flow_id)
- goto fail;
-#endif
- table = &state->table[EFX_FILTER_TABLE_RX_IP];
- table->id = EFX_FILTER_TABLE_RX_IP;
- table->offset = FR_BZ_RX_FILTER_TBL0;
- table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
- table->step = FR_BZ_RX_FILTER_TBL0_STEP;
- }
-
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- table = &state->table[EFX_FILTER_TABLE_RX_MAC];
- table->id = EFX_FILTER_TABLE_RX_MAC;
- table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
- table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
-
- table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- table->id = EFX_FILTER_TABLE_RX_DEF;
- table->size = EFX_FILTER_SIZE_RX_DEF;
-
- table = &state->table[EFX_FILTER_TABLE_TX_MAC];
- table->id = EFX_FILTER_TABLE_TX_MAC;
- table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
- table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
- }
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- table = &state->table[table_id];
- if (table->size == 0)
- continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!table->used_bitmap)
- goto fail;
- table->spec = vzalloc(table->size * sizeof(*table->spec));
- if (!table->spec)
- goto fail;
- }
-
- if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
- /* RX default filters must always exist */
- unsigned i;
- for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
- efx_filter_reset_rx_def(efx, i);
- }
-
- efx_filter_push_rx_config(efx);
-
- return 0;
-
-fail:
- efx_remove_filters(efx);
- return -ENOMEM;
-}
-
-void efx_remove_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
- vfree(state->table[table_id].spec);
- }
-#ifdef CONFIG_RFS_ACCEL
- kfree(state->rps_flow_id);
-#endif
- kfree(state);
-}
-
-/* Update scatter enable flags for filters pointing to our own RX queues */
-void efx_filter_update_rx_scatter(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- efx_oword_t filter;
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
-
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (!test_bit(filter_idx, table->used_bitmap) ||
- table->spec[filter_idx].dmaq_id >=
- efx->n_rx_channels)
- continue;
-
- if (efx->rx_scatter)
- table->spec[filter_idx].flags |=
- EFX_FILTER_FLAG_RX_SCATTER;
- else
- table->spec[filter_idx].flags &=
- ~EFX_FILTER_FLAG_RX_SCATTER;
-
- if (table_id == EFX_FILTER_TABLE_RX_DEF)
- /* Pushed by efx_filter_push_rx_config() */
- continue;
-
- efx_filter_build(&filter, &table->spec[filter_idx]);
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
- }
-
- efx_filter_push_rx_config(efx);
-
- spin_unlock_bh(&state->lock);
-}
-
-#ifdef CONFIG_RFS_ACCEL
-
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_spec spec;
- const struct iphdr *ip;
- const __be16 *ports;
- int nhoff;
- int rc;
-
- nhoff = skb_network_offset(skb);
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(struct vlan_hdr));
- if (((const struct vlan_hdr *)skb->data + nhoff)->
- h_vlan_encapsulated_proto != htons(ETH_P_IP))
- return -EPROTONOSUPPORT;
-
- /* This is IP over 802.1q VLAN. We can't filter on the
- * IP 5-tuple and the vlan together, so just strip the
- * vlan header and filter on the IP part.
- */
- nhoff += sizeof(struct vlan_hdr);
- } else if (skb->protocol != htons(ETH_P_IP)) {
- return -EPROTONOSUPPORT;
- }
-
- /* RFS must validate the IP header length before calling us */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- ip = (const struct iphdr *)(skb->data + nhoff);
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
-
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
- efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
- rxq_index);
- rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
- ip->daddr, ports[1], ip->saddr, ports[0]);
- if (rc)
- return rc;
-
- rc = efx_filter_insert_filter(efx, &spec, true);
- if (rc < 0)
- return rc;
-
- /* Remember this so we can check whether to expire the filter later */
- state->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
- ++channel->rfs_filters_added;
-
- netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
- (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
- &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
- rxq_index, flow_id, rc);
-
- return rc;
-}
-
-bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
- unsigned mask = table->size - 1;
- unsigned index;
- unsigned stop;
-
- if (!spin_trylock_bh(&state->lock))
- return false;
-
- index = state->rps_expire_index;
- stop = (index + quota) & mask;
-
- while (index != stop) {
- if (test_bit(index, table->used_bitmap) &&
- table->spec[index].priority == EFX_FILTER_PRI_HINT &&
- rps_may_expire_flow(efx->net_dev,
- table->spec[index].dmaq_id,
- state->rps_flow_id[index], index)) {
- netif_info(efx, rx_status, efx->net_dev,
- "expiring filter %d [flow %u]\n",
- index, state->rps_flow_id[index]);
- efx_filter_table_clear_entry(efx, table, index);
- }
- index = (index + 1) & mask;
- }
-
- state->rps_expire_index = stop;
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
-
- spin_unlock_bh(&state->lock);
- return true;
-}
-
-#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 5cb54723b824..63c77a557178 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,32 +11,49 @@
#define EFX_FILTER_H
#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
/**
- * enum efx_filter_type - type of hardware filter
- * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
- * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
- * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
- * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
- * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
- * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
- * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast
- * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast
- * @EFX_FILTER_UNSPEC: Match type is unspecified
+ * enum efx_filter_match_flags - Flags for hardware filter match type
+ * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
+ * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
+ * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
+ * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
+ * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
+ * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
+ * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
+ * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
+ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
+ * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
+ * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * Used for RX default unicast and multicast/broadcast filters.
*
- * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
+ * Only some combinations are supported, depending on NIC type:
+ *
+ * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
+ * local 2-tuple (only implemented for Falcon B0)
+ *
+ * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
+ * or local 2-tuple, or local MAC with or without outer VID, and RX
+ * default filters
+ *
+ * - Huntington supports filter matching controlled by firmware, potentially
+ * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
+ * with or without outer and inner VID
*/
-enum efx_filter_type {
- EFX_FILTER_TCP_FULL = 0,
- EFX_FILTER_TCP_WILD,
- EFX_FILTER_UDP_FULL,
- EFX_FILTER_UDP_WILD,
- EFX_FILTER_MAC_FULL = 4,
- EFX_FILTER_MAC_WILD,
- EFX_FILTER_UC_DEF = 8,
- EFX_FILTER_MC_DEF,
- EFX_FILTER_TYPE_COUNT, /* number of specific types */
- EFX_FILTER_UNSPEC = 0xf,
+enum efx_filter_match_flags {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001,
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002,
+ EFX_FILTER_MATCH_REM_MAC = 0x0004,
+ EFX_FILTER_MATCH_REM_PORT = 0x0008,
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010,
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020,
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
+ EFX_FILTER_MATCH_INNER_VID = 0x0080,
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100,
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200,
+ EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
};
/**
@@ -61,37 +78,75 @@ enum efx_filter_priority {
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
+ * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
+ * network stack. The filter must have a priority of
+ * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
+ * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
+ * request with priority %EFX_FILTER_PRI_MANUAL will reset the
+ * steering (but not remove the filter).
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+ EFX_FILTER_FLAG_RX_STACK = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
/**
* struct efx_filter_spec - specification for a hardware filter
- * @type: Type of match to be performed, from &enum efx_filter_type
+ * @match_flags: Match type flags, from &enum efx_filter_match_flags
* @priority: Priority of the filter, from &enum efx_filter_priority
* @flags: Miscellaneous flags, from &enum efx_filter_flags
- * @dmaq_id: Source/target queue index
- * @data: Match data (type-dependent)
+ * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
+ * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
+ * an RX drop filter
+ * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
+ * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
+ * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
+ * %EFX_FILTER_MATCH_LOC_MAC_IG is set
+ * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
+ * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
+ * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
+ * is set
+ * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
+ * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
+ * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
+ * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
*
- * Use the efx_filter_set_*() functions to initialise the @type and
- * @data fields.
+ * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
+ * used to initialise the structure. The efx_filter_set_*() functions
+ * may then be used to set @rss_context, @match_flags and related
+ * fields.
*
* The @priority field is used by software to determine whether a new
* filter may replace an old one. The hardware priority of a filter
- * depends on the filter type.
+ * depends on which fields are matched.
*/
struct efx_filter_spec {
- u8 type:4;
- u8 priority:4;
- u8 flags;
- u16 dmaq_id;
- u32 data[3];
+ u32 match_flags:12;
+ u32 priority:2;
+ u32 flags:6;
+ u32 dmaq_id:12;
+ u32 rss_context;
+ __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ __be16 inner_vid;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ __be16 ether_type;
+ u8 ip_proto;
+ __be32 loc_host[4];
+ __be32 rem_host[4];
+ __be16 loc_port;
+ __be16 rem_port;
+ /* total 64 bytes */
+};
+
+enum {
+ EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
+ EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
};
static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
@@ -99,39 +154,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
enum efx_filter_flags flags,
unsigned rxq_id)
{
- spec->type = EFX_FILTER_UNSPEC;
+ memset(spec, 0, sizeof(*spec));
spec->priority = priority;
spec->flags = EFX_FILTER_FLAG_RX | flags;
+ spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
spec->dmaq_id = rxq_id;
}
static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
unsigned txq_id)
{
- spec->type = EFX_FILTER_UNSPEC;
+ memset(spec, 0, sizeof(*spec));
spec->priority = EFX_FILTER_PRI_REQUIRED;
spec->flags = EFX_FILTER_FLAG_TX;
spec->dmaq_id = txq_id;
}
-extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port);
-extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port);
-extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port,
- __be32 rhost, __be16 rport);
-extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port,
- __be32 *rhost, __be16 *rport);
-extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr);
-extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
- u16 *vid, u8 *addr);
-extern int efx_filter_set_uc_def(struct efx_filter_spec *spec);
-extern int efx_filter_set_mc_def(struct efx_filter_spec *spec);
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = host;
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @lhost: Local host address (network byte order)
+ * @lport: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 lhost, __be16 lport,
+ __be32 rhost, __be16 rport)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = lhost;
+ spec->loc_port = lport;
+ spec->rem_host[0] = rhost;
+ spec->rem_port = rport;
+ return 0;
+}
+
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and/or VID
+ * @spec: Specification to initialise
+ * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address, or %NULL
+ */
+static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
+ return -EINVAL;
+
+ if (vid != EFX_FILTER_VID_UNSPEC) {
+ spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->outer_vid = htons(vid);
+ }
+ if (addr != NULL) {
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->loc_mac, addr, ETH_ALEN);
+ }
+ return 0;
+}
+
+/**
+ * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ return 0;
+}
+
+/**
+ * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ spec->loc_mac[0] = 1;
+ return 0;
+}
+
#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96759aee1c6c..96ce507d8602 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -20,7 +20,7 @@
*
**************************************************************************
*
- * Notes on locking strategy:
+ * Notes on locking strategy for the Falcon architecture:
*
* Many CSRs are very wide and cannot be read or written atomically.
* Writes from the host are buffered by the Bus Interface Unit (BIU)
@@ -54,6 +54,12 @@
* register while the collector already holds values for some other
* register, the write is discarded and the collector maintains its
* current state.
+ *
+ * The EF10 architecture exposes very few registers to the host and
+ * most of them are only 32 bits wide. The only exceptions are the MC
+ * doorbell register pair, which has its own latching, and
+ * TX_DESC_UPD, which works in a similar way to the Falcon
+ * architecture.
*/
#if BITS_PER_LONG == 64
@@ -83,7 +89,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
}
/* Write a normal 128-bit CSR, locking as appropriate. */
-static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
+static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
@@ -108,7 +114,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
- efx_qword_t *value, unsigned int index)
+ const efx_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
@@ -129,7 +135,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
}
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
-static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
+static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
@@ -190,8 +196,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
}
/* Write a 128-bit CSR forming part of a table */
-static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
- unsigned int reg, unsigned int index)
+static inline void
+efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
+ unsigned int reg, unsigned int index)
{
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
}
@@ -203,12 +210,12 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Page-mapped register block size */
-#define EFX_PAGE_BLOCK_SIZE 0x2000
+/* Page size used as step between per-VI registers */
+#define EFX_VI_PAGE_SIZE 0x2000
-/* Calculate offset to page-mapped register block */
+/* Calculate offset to page-mapped register */
#define EFX_PAGED_REG(page, reg) \
- ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
+ ((page) * EFX_VI_PAGE_SIZE + (reg))
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
@@ -236,19 +243,24 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
page)
-/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
- * RX_DESC_UPD or TX_DESC_UPD)
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
+ * high bits of RX_DESC_UPD or TX_DESC_UPD)
*/
-static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
- unsigned int reg, unsigned int page)
+static inline void
+_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
+ unsigned int reg, unsigned int page)
{
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
reg + \
- BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
- && (reg) != 0xa1c), \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && \
+ (reg) != 0x420 && \
+ (reg) != 0x830 && \
+ (reg) != 0x83c && \
+ (reg) != 0xa18 && \
+ (reg) != 0xa1c), \
page)
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
@@ -256,7 +268,7 @@ static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
* collector register.
*/
static inline void _efx_writed_page_locked(struct efx_nic *efx,
- efx_dword_t *value,
+ const efx_dword_t *value,
unsigned int reg,
unsigned int page)
{
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 97dd8f18c001..128d7cdf9eb2 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -8,10 +8,11 @@
*/
#include <linux/delay.h>
+#include <asm/cmpxchg.h>
#include "net_driver.h"
#include "nic.h"
#include "io.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "mcdi_pcol.h"
#include "phy.h"
@@ -24,112 +25,235 @@
#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_PDU(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
-#define MCDI_DOORBELL(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
-#define MCDI_STATUS(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
-
/* A reboot/assertion causes the MCDI status word to be set after the
* command word is set or a REBOOT event is sent. If we notice a reboot
- * via these mechanisms then wait 10ms for the status word to be set. */
+ * via these mechanisms then wait 20ms for the status word to be set.
+ */
#define MCDI_STATUS_DELAY_US 100
-#define MCDI_STATUS_DELAY_COUNT 100
+#define MCDI_STATUS_DELAY_COUNT 200
#define MCDI_STATUS_SLEEP_MS \
(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
#define SEQ_MASK \
EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+struct efx_mcdi_async_param {
+ struct list_head list;
+ unsigned int cmd;
+ size_t inlen;
+ size_t outlen;
+ efx_mcdi_async_completer *complete;
+ unsigned long cookie;
+ /* followed by request/response buffer */
+};
+
+static void efx_mcdi_timeout_async(unsigned long context);
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached_out);
+
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data;
- EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
- nic_data = efx->nic_data;
- return &nic_data->mcdi;
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->iface;
}
-void efx_mcdi_init(struct efx_nic *efx)
+int efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
+ bool already_attached;
+ int rc;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
- return;
+ efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
+ if (!efx->mcdi)
+ return -ENOMEM;
mcdi = efx_mcdi(efx);
+ mcdi->efx = efx;
init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock);
- atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ mcdi->state = MCDI_STATE_QUIESCENT;
mcdi->mode = MCDI_MODE_POLL;
+ spin_lock_init(&mcdi->async_lock);
+ INIT_LIST_HEAD(&mcdi->async_list);
+ setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
+ (unsigned long)mcdi);
(void) efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+
+ /* Recover from a failed assertion before probing */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ /* Let the MC (and BMC, if this is a LOM) know that the driver
+ * is loaded. We should do this before we reset the NIC.
+ */
+ rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
+ return rc;
+ }
+ if (already_attached)
+ /* Not a fatal error */
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
+
+ return 0;
}
-static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen)
+void efx_mcdi_fini(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return;
+
+ BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
+
+ /* Relinquish the device (back to the BMC, if this is a LOM) */
+ efx_mcdi_drv_attach(efx, false, NULL);
+
+ kfree(efx->mcdi);
+}
+
+static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
- unsigned int i;
- efx_dword_t hdr;
+ efx_dword_t hdr[2];
+ size_t hdr_len;
u32 xflags, seqno;
- BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN);
+ BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
+
+ /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ spin_unlock_bh(&mcdi->iface_lock);
seqno = mcdi->seqno & SEQ_MASK;
xflags = 0;
if (mcdi->mode == MCDI_MODE_EVENTS)
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
- EFX_POPULATE_DWORD_6(hdr,
- MCDI_HEADER_RESPONSE, 0,
- MCDI_HEADER_RESYNC, 1,
- MCDI_HEADER_CODE, cmd,
- MCDI_HEADER_DATALEN, inlen,
- MCDI_HEADER_SEQ, seqno,
- MCDI_HEADER_XFLAGS, xflags);
-
- efx_writed(efx, &hdr, pdu);
+ if (efx->type->mcdi_max_ver == 1) {
+ /* MCDI v1 */
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, cmd,
+ MCDI_HEADER_DATALEN, inlen,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ hdr_len = 4;
+ } else {
+ /* MCDI v2 */
+ BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
+ hdr_len = 8;
+ }
- for (i = 0; i < inlen; i += 4)
- _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+ efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
- /* Ensure the payload is written out before the header */
- wmb();
+ mcdi->new_epoch = false;
+}
- /* ring the doorbell with a distinctive value */
- _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+static int efx_mcdi_errno(unsigned int mcdi_err)
+{
+ switch (mcdi_err) {
+ case 0:
+ return 0;
+#define TRANSLATE_ERROR(name) \
+ case MC_CMD_ERR_ ## name: \
+ return -name;
+ TRANSLATE_ERROR(EPERM);
+ TRANSLATE_ERROR(ENOENT);
+ TRANSLATE_ERROR(EINTR);
+ TRANSLATE_ERROR(EAGAIN);
+ TRANSLATE_ERROR(EACCES);
+ TRANSLATE_ERROR(EBUSY);
+ TRANSLATE_ERROR(EINVAL);
+ TRANSLATE_ERROR(EDEADLK);
+ TRANSLATE_ERROR(ENOSYS);
+ TRANSLATE_ERROR(ETIME);
+ TRANSLATE_ERROR(EALREADY);
+ TRANSLATE_ERROR(ENOSPC);
+#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return -ENOBUFS;
+ case MC_CMD_ERR_MAC_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EPROTO;
+ }
}
-static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
+static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- int i;
+ unsigned int respseq, respcmd, error;
+ efx_dword_t hdr;
+
+ efx->type->mcdi_read_response(efx, &hdr, 0, 4);
+ respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
+ respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
+ error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
- BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN);
+ if (respcmd != MC_CMD_V2_EXTN) {
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
+ } else {
+ efx->type->mcdi_read_response(efx, &hdr, 4, 4);
+ mcdi->resp_hdr_len = 8;
+ mcdi->resp_data_len =
+ EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
- for (i = 0; i < outlen; i += 4)
- *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+ if (error && mcdi->resp_data_len == 0) {
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
+ mcdi->resprc = -EIO;
+ } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
+ mcdi->resprc = -EIO;
+ } else if (error) {
+ efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
+ mcdi->resprc =
+ efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
+ } else {
+ mcdi->resprc = 0;
+ }
}
static int efx_mcdi_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned long time, finish;
- unsigned int respseq, respcmd, error;
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- unsigned int rc, spins;
- efx_dword_t reg;
+ unsigned int spins;
+ int rc;
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
- rc = -efx_mcdi_poll_reboot(efx);
- if (rc)
- goto out;
+ rc = efx_mcdi_poll_reboot(efx);
+ if (rc) {
+ spin_lock_bh(&mcdi->iface_lock);
+ mcdi->resprc = rc;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ spin_unlock_bh(&mcdi->iface_lock);
+ return 0;
+ }
/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
* because generally mcdi responses are fast. After that, back off
@@ -149,59 +273,16 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = jiffies;
rmb();
- efx_readd(efx, &reg, pdu);
-
- /* All 1's indicates that shared memory is in reset (and is
- * not a valid header). Wait for it to come out reset before
- * completing the command */
- if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
- EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+ if (efx->type->mcdi_poll_response(efx))
break;
if (time_after(time, finish))
return -ETIMEDOUT;
}
- mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
- respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
- respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
- error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
-
- if (error && mcdi->resplen == 0) {
- netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
- rc = EIO;
- } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
- netif_err(efx, hw, efx->net_dev,
- "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
- respseq, mcdi->seqno);
- rc = EIO;
- } else if (error) {
- efx_readd(efx, &reg, pdu + 4);
- switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
-#define TRANSLATE_ERROR(name) \
- case MC_CMD_ERR_ ## name: \
- rc = name; \
- break
- TRANSLATE_ERROR(ENOENT);
- TRANSLATE_ERROR(EINTR);
- TRANSLATE_ERROR(EACCES);
- TRANSLATE_ERROR(EBUSY);
- TRANSLATE_ERROR(EINVAL);
- TRANSLATE_ERROR(EDEADLK);
- TRANSLATE_ERROR(ENOSYS);
- TRANSLATE_ERROR(ETIME);
-#undef TRANSLATE_ERROR
- default:
- rc = EIO;
- break;
- }
- } else
- rc = 0;
-
-out:
- mcdi->resprc = rc;
- if (rc)
- mcdi->resplen = 0;
+ spin_lock_bh(&mcdi->iface_lock);
+ efx_mcdi_read_response_header(efx);
+ spin_unlock_bh(&mcdi->iface_lock);
/* Return rc=0 like wait_event_timeout() */
return 0;
@@ -212,52 +293,36 @@ out:
*/
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
- unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
- efx_dword_t reg;
- uint32_t value;
-
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
- return false;
-
- efx_readd(efx, &reg, addr);
- value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
-
- if (value == 0)
+ if (!efx->mcdi)
return 0;
- /* MAC statistics have been cleared on the NIC; clear our copy
- * so that efx_update_diff_stat() can continue to work.
- */
- memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
-
- EFX_ZERO_DWORD(reg);
- efx_writed(efx, &reg, addr);
+ return efx->type->mcdi_poll_reboot(efx);
+}
- if (value == MC_STATUS_DWORD_ASSERT)
- return -EINTR;
- else
- return -EIO;
+static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
+{
+ return cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
+ MCDI_STATE_QUIESCENT;
}
-static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
+static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
{
/* Wait until the interface becomes QUIESCENT and we win the race
- * to mark it RUNNING. */
+ * to mark it RUNNING_SYNC.
+ */
wait_event(mcdi->wq,
- atomic_cmpxchg(&mcdi->state,
- MCDI_STATE_QUIESCENT,
- MCDI_STATE_RUNNING)
- == MCDI_STATE_QUIESCENT);
+ cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
+ MCDI_STATE_QUIESCENT);
}
static int efx_mcdi_await_completion(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- if (wait_event_timeout(
- mcdi->wq,
- atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
- MCDI_RPC_TIMEOUT) == 0)
+ if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
+ MCDI_RPC_TIMEOUT) == 0)
return -ETIMEDOUT;
/* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -274,17 +339,14 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
return 0;
}
-static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
+/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
+ * requester. Return whether this was done. Does not take any locks.
+ */
+static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
{
- /* If the interface is RUNNING, then move to COMPLETED and wake any
- * waiters. If the interface isn't in RUNNING then we've received a
- * duplicate completion after we've already transitioned back to
- * QUIESCENT. [A subsequent invocation would increment seqno, so would
- * have failed the seqno check].
- */
- if (atomic_cmpxchg(&mcdi->state,
- MCDI_STATE_RUNNING,
- MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
+ MCDI_STATE_RUNNING_SYNC) {
wake_up(&mcdi->wq);
return true;
}
@@ -294,12 +356,93 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
{
- atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ struct efx_mcdi_async_param *async;
+ struct efx_nic *efx = mcdi->efx;
+
+ /* Process the asynchronous request queue */
+ spin_lock_bh(&mcdi->async_lock);
+ async = list_first_entry_or_null(
+ &mcdi->async_list, struct efx_mcdi_async_param, list);
+ if (async) {
+ mcdi->state = MCDI_STATE_RUNNING_ASYNC;
+ efx_mcdi_send_request(efx, async->cmd,
+ (const efx_dword_t *)(async + 1),
+ async->inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ spin_unlock_bh(&mcdi->async_lock);
+
+ if (async)
+ return;
+ }
+
+ mcdi->state = MCDI_STATE_QUIESCENT;
wake_up(&mcdi->wq);
}
+/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
+ * asynchronous completion function, and release the interface.
+ * Return whether this was done. Must be called in bh-disabled
+ * context. Will take iface_lock and async_lock.
+ */
+static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
+{
+ struct efx_nic *efx = mcdi->efx;
+ struct efx_mcdi_async_param *async;
+ size_t hdr_len, data_len;
+ efx_dword_t *outbuf;
+ int rc;
+
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
+ MCDI_STATE_RUNNING_ASYNC)
+ return false;
+
+ spin_lock(&mcdi->iface_lock);
+ if (timeout) {
+ /* Ensure that if the completion event arrives later,
+ * the seqno check in efx_mcdi_ev_cpl() will fail
+ */
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ rc = -ETIMEDOUT;
+ hdr_len = 0;
+ data_len = 0;
+ } else {
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ }
+ spin_unlock(&mcdi->iface_lock);
+
+ /* Stop the timer. In case the timer function is running, we
+ * must wait for it to return so that there is no possibility
+ * of it aborting the next request.
+ */
+ if (!timeout)
+ del_timer_sync(&mcdi->async_timer);
+
+ spin_lock(&mcdi->async_lock);
+ async = list_first_entry(&mcdi->async_list,
+ struct efx_mcdi_async_param, list);
+ list_del(&async->list);
+ spin_unlock(&mcdi->async_lock);
+
+ outbuf = (efx_dword_t *)(async + 1);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(async->outlen, data_len));
+ async->complete(efx, async->cookie, rc, outbuf, data_len);
+ kfree(async);
+
+ efx_mcdi_release(mcdi);
+
+ return true;
+}
+
static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
- unsigned int datalen, unsigned int errno)
+ unsigned int datalen, unsigned int mcdi_err)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool wake = false;
@@ -315,52 +458,161 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
"MC response mismatch tx seq 0x%x rx "
"seq 0x%x\n", seqno, mcdi->seqno);
} else {
- mcdi->resprc = errno;
- mcdi->resplen = datalen;
+ if (efx->type->mcdi_max_ver >= 2) {
+ /* MCDI v2 responses don't fit in an event */
+ efx_mcdi_read_response_header(efx);
+ } else {
+ mcdi->resprc = efx_mcdi_errno(mcdi_err);
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = datalen;
+ }
wake = true;
}
spin_unlock(&mcdi->iface_lock);
- if (wake)
- efx_mcdi_complete(mcdi);
+ if (wake) {
+ if (!efx_mcdi_complete_async(mcdi, false))
+ (void) efx_mcdi_complete_sync(mcdi);
+
+ /* If the interface isn't RUNNING_ASYNC or
+ * RUNNING_SYNC then we've received a duplicate
+ * completion after we've already transitioned back to
+ * QUIESCENT. [A subsequent invocation would increment
+ * seqno, so would have failed the seqno check].
+ */
+ }
+}
+
+static void efx_mcdi_timeout_async(unsigned long context)
+{
+ struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
+
+ efx_mcdi_complete_async(mcdi, true);
+}
+
+static int
+efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
+{
+ if (efx->type->mcdi_max_ver < 0 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
+ return -EINVAL;
+
+ if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ inlen > MCDI_CTL_SDU_LEN_MAX_V1))
+ return -EMSGSIZE;
+
+ return 0;
}
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc)
+ return rc;
return efx_mcdi_rpc_finish(efx, cmd, inlen,
outbuf, outlen, outlen_actual);
}
-void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
- size_t inlen)
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
- BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
- efx_mcdi_acquire(mcdi);
+ efx_mcdi_acquire_sync(mcdi);
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ return 0;
+}
- /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
- spin_lock_bh(&mcdi->iface_lock);
- ++mcdi->seqno;
- spin_unlock_bh(&mcdi->iface_lock);
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ struct efx_mcdi_async_param *async;
+ int rc;
+
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
+
+ async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
+ GFP_ATOMIC);
+ if (!async)
+ return -ENOMEM;
+
+ async->cmd = cmd;
+ async->inlen = inlen;
+ async->outlen = outlen;
+ async->complete = complete;
+ async->cookie = cookie;
+ memcpy(async + 1, inbuf, inlen);
+
+ spin_lock_bh(&mcdi->async_lock);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ list_add_tail(&async->list, &mcdi->async_list);
+
+ /* If this is at the front of the queue, try to start it
+ * immediately
+ */
+ if (mcdi->async_list.next == &async->list &&
+ efx_mcdi_acquire_async(mcdi)) {
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ } else {
+ kfree(async);
+ rc = -ENETDOWN;
+ }
+
+ spin_unlock_bh(&mcdi->async_lock);
- efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+ return rc;
}
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- u8 *outbuf, size_t outlen, size_t *outlen_actual)
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
int rc;
- BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
-
if (mcdi->mode == MCDI_MODE_POLL)
rc = efx_mcdi_poll(efx);
else
@@ -380,22 +632,25 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
"MC command 0x%x inlen %d mode %d timed out\n",
cmd, (int)inlen, mcdi->mode);
} else {
- size_t resplen;
+ size_t hdr_len, data_len;
/* At the very least we need a memory barrier here to ensure
* we pick up changes from efx_mcdi_ev_cpl(). Protect against
* a spurious efx_mcdi_ev_cpl() running concurrently by
* acquiring the iface_lock. */
spin_lock_bh(&mcdi->iface_lock);
- rc = -mcdi->resprc;
- resplen = mcdi->resplen;
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
spin_unlock_bh(&mcdi->iface_lock);
+ BUG_ON(rc > 0);
+
if (rc == 0) {
- efx_mcdi_copyout(efx, outbuf,
- min(outlen, mcdi->resplen + 3) & ~0x3);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
if (outlen_actual != NULL)
- *outlen_actual = resplen;
+ *outlen_actual = data_len;
} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
; /* Don't reset if MC_CMD_REBOOT returns EIO */
else if (rc == -EIO || rc == -EINTR) {
@@ -410,6 +665,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
if (rc == -EIO || rc == -EINTR) {
msleep(MCDI_STATUS_SLEEP_MS);
efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
}
}
@@ -417,11 +673,15 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
return rc;
}
+/* Switch to polled MCDI completions. This can be called in various
+ * error conditions with various locks held, so it must be lockless.
+ * Caller is responsible for flushing asynchronous requests later.
+ */
void efx_mcdi_mode_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ if (!efx->mcdi)
return;
mcdi = efx_mcdi(efx);
@@ -434,18 +694,57 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
* efx_mcdi_await_completion() will then call efx_mcdi_poll().
*
* We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
- * which efx_mcdi_complete() provides for us.
+ * which efx_mcdi_complete_sync() provides for us.
*/
mcdi->mode = MCDI_MODE_POLL;
- efx_mcdi_complete(mcdi);
+ efx_mcdi_complete_sync(mcdi);
+}
+
+/* Flush any running or queued asynchronous requests, after event processing
+ * is stopped
+ */
+void efx_mcdi_flush_async(struct efx_nic *efx)
+{
+ struct efx_mcdi_async_param *async, *next;
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+
+ /* We must be in polling mode so no more requests can be queued */
+ BUG_ON(mcdi->mode != MCDI_MODE_POLL);
+
+ del_timer_sync(&mcdi->async_timer);
+
+ /* If a request is still running, make sure we give the MC
+ * time to complete it so that the response won't overwrite our
+ * next request.
+ */
+ if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
+ efx_mcdi_poll(efx);
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ }
+
+ /* Nothing else will access the async list now, so it is safe
+ * to walk it without holding async_lock. If we hold it while
+ * calling a completer then lockdep may warn that we have
+ * acquired locks in the wrong order.
+ */
+ list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ list_del(&async->list);
+ kfree(async);
+ }
}
void efx_mcdi_mode_event(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ if (!efx->mcdi)
return;
mcdi = efx_mcdi(efx);
@@ -460,7 +759,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
* write memory barrier ensure that efx_mcdi_rpc() sees it, which
* efx_mcdi_acquire() provides.
*/
- efx_mcdi_acquire(mcdi);
+ efx_mcdi_acquire_sync(mcdi);
mcdi->mode = MCDI_MODE_EVENTS;
efx_mcdi_release(mcdi);
}
@@ -477,19 +776,25 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
* are sent to the same queue, we can't be racing with
* efx_mcdi_ev_cpl()]
*
- * There's a race here with efx_mcdi_rpc(), because we might receive
- * a REBOOT event *before* the request has been copied out. In polled
- * mode (during startup) this is irrelevant, because efx_mcdi_complete()
- * is ignored. In event mode, this condition is just an edge-case of
- * receiving a REBOOT event after posting the MCDI request. Did the mc
- * reboot before or after the copyout? The best we can do always is
- * just return failure.
+ * If there is an outstanding asynchronous request, we can't
+ * complete it now (efx_mcdi_complete() would deadlock). The
+ * reset process will take care of this.
+ *
+ * There's a race here with efx_mcdi_send_request(), because
+ * we might receive a REBOOT event *before* the request has
+ * been copied out. In polled mode (during startup) this is
+ * irrelevant, because efx_mcdi_complete_sync() is ignored. In
+ * event mode, this condition is just an edge-case of
+ * receiving a REBOOT event after posting the MCDI
+ * request. Did the mc reboot before or after the copyout? The
+ * best we can do always is just return failure.
*/
spin_lock(&mcdi->iface_lock);
- if (efx_mcdi_complete(mcdi)) {
+ if (efx_mcdi_complete_sync(mcdi)) {
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc;
- mcdi->resplen = 0;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
++mcdi->credits;
}
} else {
@@ -504,41 +809,12 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
break;
udelay(MCDI_STATUS_DELAY_US);
}
+ mcdi->new_epoch = true;
}
spin_unlock(&mcdi->iface_lock);
}
-static unsigned int efx_mcdi_event_link_speed[] = {
- [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
- [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
- [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
-};
-
-
-static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
-{
- u32 flags, fcntl, speed, lpa;
-
- speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
- EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
- speed = efx_mcdi_event_link_speed[speed];
-
- flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
- fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
- lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
-
- /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
- * which is only run after flushing the event queues. Therefore, it
- * is safe to modify the link state outside of the mac_lock here.
- */
- efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
-
- efx_mcdi_phy_check_fcntl(efx, lpa);
-
- efx_link_status_changed(efx);
-}
-
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -551,7 +827,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_BADSSERT:
netif_err(efx, hw, efx->net_dev,
"MC watchdog or assertion failure at 0x%x\n", data);
- efx_mcdi_ev_death(efx, EINTR);
+ efx_mcdi_ev_death(efx, -EINTR);
break;
case MCDI_EVENT_CODE_PMNOTICE:
@@ -576,8 +852,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
"MC Scheduler error address=0x%x\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
+ case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
- efx_mcdi_ev_death(efx, EIO);
+ efx_mcdi_ev_death(efx, -EIO);
break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
@@ -590,7 +867,27 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
break;
-
+ case MCDI_EVENT_CODE_TX_FLUSH:
+ case MCDI_EVENT_CODE_RX_FLUSH:
+ /* Two flush events will be sent: one to the same event
+ * queue as completions, and one to event queue 0.
+ * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
+ * flag will be set, and we should ignore the event
+ * because we want to wait for all completions.
+ */
+ BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
+ MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
+ if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
+ efx_ef10_handle_drain_event(efx);
+ break;
+ case MCDI_EVENT_CODE_TX_ERR:
+ case MCDI_EVENT_CODE_RX_ERR:
+ netif_err(efx, hw, efx->net_dev,
+ "%s DMA error (event: "EFX_QWORD_FMT")\n",
+ code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
+ EFX_QWORD_VAL(*event));
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ break;
default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
code);
@@ -606,27 +903,55 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
- u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)];
+ MCDI_DECLARE_BUF(outbuf,
+ max(MC_CMD_GET_VERSION_OUT_LEN,
+ MC_CMD_GET_CAPABILITIES_OUT_LEN));
size_t outlength;
const __le16 *ver_words;
+ size_t offset;
int rc;
BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
-
rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
if (rc)
goto fail;
-
if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
rc = -EIO;
goto fail;
}
ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
- snprintf(buf, len, "%u.%u.%u.%u",
- le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
- le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+ offset = snprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+
+ /* EF10 may have multiple datapath firmware variants within a
+ * single version. Report which variants are running.
+ */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
+ offset += snprintf(
+ buf + offset, len - offset, " rx? tx?");
+ else
+ offset += snprintf(
+ buf + offset, len - offset, " rx%x tx%x",
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
+ }
+
return;
fail:
@@ -634,17 +959,18 @@ fail:
buf[0] = 0;
}
-int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
- bool *was_attached)
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached)
{
- u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
- u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
driver_operating ? 1 : 0);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -667,8 +993,8 @@ fail:
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities)
{
- uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
- size_t outlen, offset, i;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
+ size_t outlen, i;
int port_num = efx_port_num(efx);
int rc;
@@ -684,22 +1010,21 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
goto fail;
}
- offset = (port_num)
- ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
- : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
if (mac_address)
- memcpy(mac_address, outbuf + offset, ETH_ALEN);
+ memcpy(mac_address,
+ port_num ?
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
+ ETH_ALEN);
if (fw_subtype_list) {
- /* Byte-swap and truncate or zero-pad as necessary */
- offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
for (i = 0;
- i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM;
- i++) {
- fw_subtype_list[i] =
- (offset + 2 <= outlen) ?
- le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
- offset += 2;
- }
+ i < MCDI_VAR_ARRAY_LEN(outlen,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ i++)
+ fw_subtype_list[i] = MCDI_ARRAY_WORD(
+ outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
+ for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
+ fw_subtype_list[i] = 0;
}
if (capabilities) {
if (port_num)
@@ -721,7 +1046,7 @@ fail:
int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
{
- u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
u32 dest = 0;
int rc;
@@ -749,7 +1074,7 @@ fail:
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
{
- u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
size_t outlen;
int rc;
@@ -777,8 +1102,8 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out)
{
- u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
size_t outlen;
int rc;
@@ -804,127 +1129,10 @@ fail:
return rc;
}
-int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
-{
- u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
- loff_t offset, u8 *buffer, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
- memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
-{
- u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
{
- u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
@@ -976,9 +1184,9 @@ fail1:
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
- u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
- unsigned int flags, index, ofst;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ unsigned int flags, index;
const char *reason;
size_t outlen;
int retry;
@@ -1020,19 +1228,20 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
- ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
- for (index = 1; index < 32; index++) {
- netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
- MCDI_DWORD2(outbuf, ofst));
- ofst += sizeof(efx_dword_t);
- }
+ for (index = 0;
+ index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++)
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
+ 1 + index,
+ MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
+ index));
return 0;
}
static void efx_mcdi_exit_assertion(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
/* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
@@ -1062,7 +1271,7 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
- u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
int rc;
BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
@@ -1080,7 +1289,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
__func__, rc);
}
-int efx_mcdi_reset_port(struct efx_nic *efx)
+static int efx_mcdi_reset_port(struct efx_nic *efx)
{
int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
if (rc)
@@ -1089,9 +1298,9 @@ int efx_mcdi_reset_port(struct efx_nic *efx)
return rc;
}
-int efx_mcdi_reset_mc(struct efx_nic *efx)
+static int efx_mcdi_reset_mc(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
@@ -1107,11 +1316,31 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
return rc;
}
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
+{
+ return RESET_TYPE_RECOVER_OR_ALL;
+}
+
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ if (method == RESET_TYPE_WORLD)
+ return efx_mcdi_reset_mc(efx);
+ else
+ return efx_mcdi_reset_port(efx);
+}
+
static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
const u8 *mac, int *id_out)
{
- u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
- u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
size_t outlen;
int rc;
@@ -1151,7 +1380,7 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
{
- u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
size_t outlen;
int rc;
@@ -1178,7 +1407,7 @@ fail:
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
{
- u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
@@ -1199,34 +1428,31 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
- __le32 *qid;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
int rc, count;
BUILD_BUG_ON(EFX_MAX_CHANNELS >
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
- qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
- if (qid == NULL)
- return -ENOMEM;
-
count = 0;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) {
if (rx_queue->flush_pending) {
rx_queue->flush_pending = false;
atomic_dec(&efx->rxq_flush_pending);
- qid[count++] = cpu_to_le32(
- efx_rx_queue_index(rx_queue));
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ count, efx_rx_queue_index(rx_queue));
+ count++;
}
}
}
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
- count * sizeof(*qid), NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
WARN_ON(rc < 0);
- kfree(qid);
-
return rc;
}
@@ -1245,3 +1471,247 @@ fail:
return rc;
}
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
+ return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+#ifdef CONFIG_SFC_MTD
+
+#define EFX_MCDI_NVRAM_LEN_MAX 128
+
+static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+ loff_t offset, u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf,
+ MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+ memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk = part->common.mtd.erasesize;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ /* The MCDI interface can in fact do multiple erase blocks at once;
+ * but erasing may be slow, so we make multiple calls here to avoid
+ * tripping the MCDI RPC timeout. */
+ while (offset < end) {
+ rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
+ chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ }
+out:
+ return rc;
+}
+
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ int rc = 0;
+
+ if (part->updating) {
+ part->updating = false;
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ }
+
+ return rc;
+}
+
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_mcdi_mtd_partition *mcdi_part =
+ container_of(part, struct efx_mcdi_mtd_partition, common);
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s:%02x",
+ efx->name, part->type_name, mcdi_part->fw_subtype);
+}
+
+#endif /* CONFIG_SFC_MTD */
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 3ba2e5b5a9cc..c34d0d4e10ee 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,18 +11,20 @@
#define EFX_MCDI_H
/**
- * enum efx_mcdi_state
+ * enum efx_mcdi_state - MCDI request handling state
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
- * mcdi_lock then they are able to move to MCDI_STATE_RUNNING
- * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
- * moved into this state is allowed to move out of it.
+ * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
+ * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
+ * Only the thread that moved into this state is allowed to move out of it.
+ * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
* has not yet consumed the result. For all other threads, equivalent to
- * MCDI_STATE_RUNNING.
+ * %MCDI_STATE_RUNNING.
*/
enum efx_mcdi_state {
MCDI_STATE_QUIESCENT,
- MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_SYNC,
+ MCDI_STATE_RUNNING_ASYNC,
MCDI_STATE_COMPLETED,
};
@@ -32,28 +34,39 @@ enum efx_mcdi_mode {
};
/**
- * struct efx_mcdi_iface
- * @state: Interface state. Waited for by mcdi_wq.
- * @wq: Wait queue for threads waiting for state != STATE_RUNNING
- * @iface_lock: Protects @credits, @seqno, @resprc, @resplen
+ * struct efx_mcdi_iface - MCDI protocol context
+ * @efx: The associated NIC.
+ * @state: Request handling state. Waited for by @wq.
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
- * Serialised by @lock
+ * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ * @iface_lock: Serialises access to @seqno, @credits and response metadata
* @seqno: The next sequence number to use for mcdi requests.
- * Serialised by @lock
* @credits: Number of spurious MCDI completion events allowed before we
- * trigger a fatal error. Protected by @lock
- * @resprc: Returned MCDI completion
- * @resplen: Returned payload length
+ * trigger a fatal error
+ * @resprc: Response error/success code (Linux numbering)
+ * @resp_hdr_len: Response header length
+ * @resp_data_len: Response data (SDU or error) length
+ * @async_lock: Serialises access to @async_list while event processing is
+ * enabled
+ * @async_list: Queue of asynchronous requests
+ * @async_timer: Timer for asynchronous request timeout
*/
struct efx_mcdi_iface {
- atomic_t state;
+ struct efx_nic *efx;
+ enum efx_mcdi_state state;
+ enum efx_mcdi_mode mode;
wait_queue_head_t wq;
spinlock_t iface_lock;
- enum efx_mcdi_mode mode;
+ bool new_epoch;
unsigned int credits;
unsigned int seqno;
- unsigned int resprc;
- size_t resplen;
+ int resprc;
+ size_t resp_hdr_len;
+ size_t resp_data_len;
+ spinlock_t async_lock;
+ struct list_head async_list;
+ struct timer_list async_timer;
};
struct efx_mcdi_mon {
@@ -65,65 +78,204 @@ struct efx_mcdi_mon {
unsigned int n_attrs;
};
-extern void efx_mcdi_init(struct efx_nic *efx);
+struct efx_mcdi_mtd_partition {
+ struct efx_mtd_partition common;
+ bool updating;
+ u16 nvram_type;
+ u16 fw_subtype;
+};
+
+#define to_efx_mcdi_mtd_partition(mtd) \
+ container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
+
+/**
+ * struct efx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @hwmon: Hardware monitor state
+ */
+struct efx_mcdi_data {
+ struct efx_mcdi_iface iface;
+#ifdef CONFIG_SFC_MCDI_MON
+ struct efx_mcdi_mon hwmon;
+#endif
+};
+
+#ifdef CONFIG_SFC_MCDI_MON
+static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
+{
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->hwmon;
+}
+#endif
+
+extern int efx_mcdi_init(struct efx_nic *efx);
+extern void efx_mcdi_fini(struct efx_nic *efx);
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
- size_t inlen, u8 *outbuf, size_t outlen,
+extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen);
+extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen);
extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- u8 *outbuf, size_t outlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+typedef void efx_mcdi_async_completer(struct efx_nic *efx,
+ unsigned long cookie, int rc,
+ efx_dword_t *outbuf,
+ size_t outlen_actual);
+extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
extern void efx_mcdi_mode_poll(struct efx_nic *efx);
extern void efx_mcdi_mode_event(struct efx_nic *efx);
+extern void efx_mcdi_flush_async(struct efx_nic *efx);
extern void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event);
extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
-#define MCDI_PTR2(_buf, _ofst) \
- (((u8 *)_buf) + _ofst)
-#define MCDI_SET_DWORD2(_buf, _ofst, _value) \
- EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_DWORD_0, _value)
-#define MCDI_DWORD2(_buf, _ofst) \
- EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_DWORD_0)
-#define MCDI_QWORD2(_buf, _ofst) \
- EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_QWORD_0)
-
-#define MCDI_PTR(_buf, _ofst) \
- MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
-#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \
- MCDI_PTR2(_buf, \
- MC_CMD_ ## _field ## _OFST + \
- (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN)
-#define MCDI_SET_DWORD(_buf, _ofst, _value) \
- MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
-#define MCDI_DWORD(_buf, _ofst) \
- MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
-#define MCDI_QWORD(_buf, _ofst) \
- MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
+/* We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned. Also, on Siena we must copy to the MC shared
+ * memory strictly 32 bits at a time, so add any necessary padding.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) \
+ efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_WORD(_buf, _field) \
+ ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_DWORD(_buf, _field) \
+ EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
+ _name2, _value2) \
+ EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2)
+#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3) \
+ EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3)
+#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4) \
+ EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4)
+#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5) \
+ EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5)
+#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6) \
+ EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6)
+#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6, _name7, _value7) \
+ EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6, \
+ MC_CMD_ ## _name7, _value7)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
+ (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
+#define MCDI_FIELD(_ptr, _type, _field) \
+ EFX_EXTRACT_DWORD( \
+ *(efx_dword_t *) \
+ _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
+ MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
+ (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
+ MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
+
+#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
+ (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
+ + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
+#define MCDI_DECLARE_STRUCT_PTR(_name) \
+ efx_dword_t *_name
+#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
+ ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_VAR_ARRAY_LEN(_len, _field) \
+ min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
+ ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
+#define MCDI_ARRAY_WORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *) \
+ _MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
+#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
+ EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
+ EFX_DWORD_0, _value)
+#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
+#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
+ do { \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
+ MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
+ _type ## _TYPEDEF, _field2)
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
-#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
- EFX_EXTRACT_DWORD( \
- *((efx_dword_t *) \
- (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
- (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
- (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
- bool *was_attached_out);
extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities);
extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
@@ -132,34 +284,29 @@ extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out);
-extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
- unsigned int type);
-extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
- loff_t offset, u8 *buffer, size_t length);
-extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer,
- size_t length);
-#define EFX_MCDI_NVRAM_LEN_MAX 128
-extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length);
-extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
- unsigned int type);
extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_reset_port(struct efx_nic *efx);
-extern int efx_mcdi_reset_mc(struct efx_nic *efx);
extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
const u8 *mac, int *id_out);
extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+extern int efx_mcdi_port_probe(struct efx_nic *efx);
+extern void efx_mcdi_port_remove(struct efx_nic *efx);
+extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+extern int efx_mcdi_port_get_number(struct efx_nic *efx);
+extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
extern int efx_mcdi_set_mac(struct efx_nic *efx);
-extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear);
-extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx);
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
#ifdef CONFIG_SFC_MCDI_MON
extern int efx_mcdi_mon_probe(struct efx_nic *efx);
@@ -169,4 +316,14 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+#ifdef CONFIG_SFC_MTD
+extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer);
+extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer);
+extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+#endif
+
#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
deleted file mode 100644
index 1003f309cba7..000000000000
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include "net_driver.h"
-#include "efx.h"
-#include "mcdi.h"
-#include "mcdi_pcol.h"
-
-int efx_mcdi_set_mac(struct efx_nic *efx)
-{
- u32 reject, fcntl;
- u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
-
- memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
- efx->net_dev->dev_addr, ETH_ALEN);
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
-
- /* The MCDI command provides for controlling accept/reject
- * of broadcast packets too, but the driver doesn't currently
- * expose this. */
- reject = (efx->promiscuous) ? 0 :
- (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
-
- switch (efx->wanted_fc) {
- case EFX_FC_RX | EFX_FC_TX:
- fcntl = MC_CMD_FCNTL_BIDIR;
- break;
- case EFX_FC_RX:
- fcntl = MC_CMD_FCNTL_RESPOND;
- break;
- default:
- fcntl = MC_CMD_FCNTL_OFF;
- break;
- }
- if (efx->wanted_fc & EFX_FC_AUTO)
- fcntl = MC_CMD_FCNTL_AUTO;
- if (efx->fc_disable)
- fcntl = MC_CMD_FCNTL_OFF;
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
- NULL, 0, NULL);
-}
-
-bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
-{
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
- size_t outlength;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), &outlength);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
- return true;
- }
-
- return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
-}
-
-int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear)
-{
- u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
- int rc;
- efx_dword_t *cmd_ptr;
- int period = enable ? 1000 : 0;
- u32 addr_hi;
- u32 addr_lo;
-
- BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
-
- addr_lo = ((u64)dma_addr) >> 0;
- addr_hi = ((u64)dma_addr) >> 32;
-
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
- cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
- EFX_POPULATE_DWORD_7(*cmd_ptr,
- MC_CMD_MAC_STATS_IN_DMA, !!enable,
- MC_CMD_MAC_STATS_IN_CLEAR, clear,
- MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
- MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
- MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
- MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
- return rc;
-}
-
-int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
-{
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- rc = efx_mcdi_set_mac(efx);
- if (rc != 0)
- return rc;
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
- efx->multicast_hash.byte,
- sizeof(efx->multicast_hash),
- NULL, 0, NULL);
-}
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 1d552f0664d7..4cc5d95b2a5a 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,31 +21,62 @@ enum efx_hwmon_type {
EFX_HWMON_UNKNOWN,
EFX_HWMON_TEMP, /* temperature */
EFX_HWMON_COOL, /* cooling device, probably a heatsink */
- EFX_HWMON_IN /* input voltage */
+ EFX_HWMON_IN, /* voltage */
+ EFX_HWMON_CURR, /* current */
+ EFX_HWMON_POWER, /* power */
};
static const struct {
const char *label;
enum efx_hwmon_type hwmon_type;
int port;
-} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = {
-#define SENSOR(name, label, hwmon_type, port) \
- [MC_CMD_SENSOR_##name] = { label, hwmon_type, port }
- SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1),
- SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1),
- SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1),
- SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0),
- SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0),
- SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1),
- SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1),
- SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1),
- SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1),
+} efx_mcdi_sensor_type[] = {
+#define SENSOR(name, label, hwmon_type, port) \
+ [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
+ SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
+ SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
+ SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
+ SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
+ SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
+ SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
+ SENSOR(IN_1V0, "1.0V supply", IN, -1),
+ SENSOR(IN_1V2, "1.2V supply", IN, -1),
+ SENSOR(IN_1V8, "1.8V supply", IN, -1),
+ SENSOR(IN_2V5, "2.5V supply", IN, -1),
+ SENSOR(IN_3V3, "3.3V supply", IN, -1),
+ SENSOR(IN_12V0, "12.0V supply", IN, -1),
+ SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
+ SENSOR(IN_VREF, "ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
+ SENSOR(FAN_0, NULL, COOL, -1),
+ SENSOR(FAN_1, NULL, COOL, -1),
+ SENSOR(FAN_2, NULL, COOL, -1),
+ SENSOR(FAN_3, NULL, COOL, -1),
+ SENSOR(FAN_4, NULL, COOL, -1),
+ SENSOR(IN_VAOE, "AOE input supply", IN, -1),
+ SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
+ SENSOR(IN_IAOE, "AOE input current", CURR, -1),
+ SENSOR(NIC_POWER, "Board power use", POWER, -1),
+ SENSOR(IN_0V9, "0.9V supply", IN, -1),
+ SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
+ SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
+ SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT_EXTADC,
+ "Controller int. temp. raw (at ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
+ "Controller int. temp. (via ADC)", TEMP, -1),
+ SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
+ SENSOR(AIRFLOW, "Air flow raw", IN, -1),
#undef SENSOR
};
@@ -54,6 +85,7 @@ static const char *const sensor_status_names[] = {
[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+ [MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
};
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
@@ -85,6 +117,7 @@ struct efx_mcdi_mon_attribute {
struct device_attribute dev_attr;
unsigned int index;
unsigned int type;
+ enum efx_hwmon_type hwmon_type;
unsigned int limit_value;
char name[12];
};
@@ -92,13 +125,12 @@ struct efx_mcdi_mon_attribute {
static int efx_mcdi_mon_update(struct efx_nic *efx)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
- u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
int rc;
- MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO,
- hwmon->dma_buf.dma_addr & 0xffffffff);
- MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI,
- (u64)hwmon->dma_buf.dma_addr >> 32);
+ MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
+ hwmon->dma_buf.dma_addr);
+ MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
inbuf, sizeof(inbuf), NULL, 0, NULL);
@@ -146,18 +178,32 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev,
struct efx_mcdi_mon_attribute *mon_attr =
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
efx_dword_t entry;
- unsigned int value;
+ unsigned int value, state;
int rc;
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
if (rc)
return rc;
+ state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ if (state == MC_CMD_SENSOR_STATE_NO_READING)
+ return -EBUSY;
+
value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
- /* Convert temperature from degrees to milli-degrees Celsius */
- if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
return sprintf(buf, "%u\n", value);
}
@@ -172,9 +218,19 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
value = mon_attr->limit_value;
- /* Convert temperature from degrees to milli-degrees Celsius */
- if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
return sprintf(buf, "%u\n", value);
}
@@ -221,6 +277,10 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
strlcpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ else
+ attr->hwmon_type = EFX_HWMON_UNKNOWN;
attr->limit_value = limit_value;
sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
@@ -234,36 +294,43 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
int efx_mcdi_mon_probe(struct efx_nic *efx)
{
+ unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
- unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0;
- u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
+ unsigned int n_pages, n_sensors, n_attrs, page;
size_t outlen;
char name[12];
u32 mask;
- int rc, i, type;
+ int rc, i, j, type;
- BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0);
+ /* Find out how many sensors are present */
+ n_sensors = 0;
+ page = 0;
+ do {
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
- rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
- return -EIO;
-
- /* Find out which sensors are present. Don't create a device
- * if there are none.
- */
- mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
- if (mask == 0)
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
+ return -EIO;
+
+ mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
+ n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ ++page;
+ } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ n_pages = page;
+
+ /* Don't create a device if there are none */
+ if (n_sensors == 0)
return 0;
- /* Check again for short response */
- if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask)))
- return -EIO;
-
- rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
- 4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
+ rc = efx_nic_alloc_buffer(
+ efx, &hwmon->dma_buf,
+ n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
+ GFP_KERNEL);
if (rc)
return rc;
@@ -274,7 +341,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
* attributes for this set of sensors: name of the driver plus
* value, min, max, crit, alarm and label for each sensor.
*/
- n_attrs = 1 + 6 * hweight32(mask);
+ n_attrs = 1 + 6 * n_sensors;
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
if (!hwmon->attrs) {
rc = -ENOMEM;
@@ -291,26 +358,63 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- for (i = 0, type = -1; ; i++) {
+ for (i = 0, j = -1, type = -1; ; i++) {
+ enum efx_hwmon_type hwmon_type;
const char *hwmon_prefix;
unsigned hwmon_index;
u16 min1, max1, min2, max2;
/* Find next sensor type or exit if there is none */
- type++;
- while (!(mask & (1 << type))) {
+ do {
type++;
- if (type == 32)
- return 0;
- }
- /* Skip sensors specific to a different port */
- if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN &&
- efx_mcdi_sensor_type[type].port >= 0 &&
- efx_mcdi_sensor_type[type].port != efx_port_num(efx))
- continue;
+ if ((type % 32) == 0) {
+ page = type / 32;
+ j = -1;
+ if (page == n_pages)
+ return 0;
+
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
+ page);
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ mask = (MCDI_DWORD(outbuf,
+ SENSOR_INFO_OUT_MASK) &
+ ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
- switch (efx_mcdi_sensor_type[type].hwmon_type) {
+ /* Check again for short response */
+ if (outlen <
+ MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ } while (!(mask & (1 << type % 32)));
+ j++;
+
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+
+ /* Skip sensors specific to a different port */
+ if (hwmon_type != EFX_HWMON_UNKNOWN &&
+ efx_mcdi_sensor_type[type].port >= 0 &&
+ efx_mcdi_sensor_type[type].port !=
+ efx_port_num(efx))
+ continue;
+ } else {
+ hwmon_type = EFX_HWMON_UNKNOWN;
+ }
+
+ switch (hwmon_type) {
case EFX_HWMON_TEMP:
hwmon_prefix = "temp";
hwmon_index = ++n_temp; /* 1-based */
@@ -327,16 +431,24 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
hwmon_prefix = "in";
hwmon_index = n_in++; /* 0-based */
break;
+ case EFX_HWMON_CURR:
+ hwmon_prefix = "curr";
+ hwmon_index = ++n_curr; /* 1-based */
+ break;
+ case EFX_HWMON_POWER:
+ hwmon_prefix = "power";
+ hwmon_index = ++n_power; /* 1-based */
+ break;
}
min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MIN1);
+ SENSOR_INFO_ENTRY, j, MIN1);
max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MAX1);
+ SENSOR_INFO_ENTRY, j, MAX1);
min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MIN2);
+ SENSOR_INFO_ENTRY, j, MIN2);
max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MAX2);
+ SENSOR_INFO_ENTRY, j, MAX2);
if (min1 != max1) {
snprintf(name, sizeof(name), "%s%u_input",
@@ -346,13 +458,15 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- snprintf(name, sizeof(name), "%s%u_min",
- hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
- efx, name, efx_mcdi_mon_show_limit,
- i, type, min1);
- if (rc)
- goto fail;
+ if (hwmon_type != EFX_HWMON_POWER) {
+ snprintf(name, sizeof(name), "%s%u_min",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, min1);
+ if (rc)
+ goto fail;
+ }
snprintf(name, sizeof(name), "%s%u_max",
hwmon_prefix, hwmon_index);
@@ -383,7 +497,8 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- if (efx_mcdi_sensor_type[type].label) {
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
+ efx_mcdi_sensor_type[type].label) {
snprintf(name, sizeof(name), "%s%u_label",
hwmon_prefix, hwmon_index);
rc = efx_mcdi_mon_add_attr(
@@ -400,8 +515,7 @@ fail:
void efx_mcdi_mon_remove(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data = efx->nic_data;
- struct efx_mcdi_mon *hwmon = &nic_data->hwmon;
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
unsigned int i;
for (i = 0; i < hwmon->n_attrs; i++)
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index c5c9747861ba..b5cf62492f8e 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,13 @@
#define MC_FW_STATE_BOOTING (4)
/* The Scheduler has started. */
#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
/* Siena MC shared memmory offsets */
/* The 'doorbell' addresses are hard-wired to alert the MC when written */
@@ -39,18 +46,21 @@
#define MC_STATUS_DWORD_REBOOT (0xb007b007)
#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
/* The current version of the MCDI protocol.
*
* Note that the ROM burnt into the card only talks V0, so at the very
* least every driver must support version 0 and MCDI_PCOL_VERSION
*/
-#define MCDI_PCOL_VERSION 1
+#define MCDI_PCOL_VERSION 2
/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
/* MCDI version 1
*
- * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
* structure, filled in by the client.
*
* 0 7 8 16 20 22 23 24 31
@@ -87,9 +97,11 @@
#define MCDI_HEADER_DATALEN_LBN 8
#define MCDI_HEADER_DATALEN_WIDTH 8
#define MCDI_HEADER_SEQ_LBN 16
-#define MCDI_HEADER_RSVD_LBN 20
-#define MCDI_HEADER_RSVD_WIDTH 2
#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
#define MCDI_HEADER_ERROR_LBN 22
#define MCDI_HEADER_ERROR_WIDTH 1
#define MCDI_HEADER_RESPONSE_LBN 23
@@ -100,7 +112,11 @@
#define MCDI_HEADER_XFLAGS_EVREQ 0x01
/* Maximum number of payload bytes */
-#define MCDI_CTL_SDU_LEN_MAX 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
/* The MC can generate events for two reasons:
* - To complete a shared memory request if XFLAGS_EVREQ was set
@@ -145,22 +161,69 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
/* Non-existent command target */
#define MC_CMD_ERR_ENOENT 2
/* assert() has killed the MC */
#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
/* Caller does not hold required locks */
#define MC_CMD_ERR_EACCES 13
/* Resource is currently unavailable (e.g. lock contention) */
#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
/* Invalid argument to target */
#define MC_CMD_ERR_EINVAL 22
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
/* Non-recursive resource is already acquired */
#define MC_CMD_ERR_EDEADLK 35
/* Operation not implemented */
#define MC_CMD_ERR_ENOSYS 38
/* Operation timed out */
#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
#define MC_CMD_ERR_CODE_OFST 0
@@ -178,9 +241,11 @@
/* Vectors in the boot ROM */
/* Point to the copycode entry point. */
-#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
/* Points to the recovery mode entry point. */
-#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -209,16 +274,29 @@
(n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
/* MCDI_EVENT structuredef */
#define MCDI_EVENT_LEN 8
#define MCDI_EVENT_CONT_LBN 32
#define MCDI_EVENT_CONT_WIDTH 1
#define MCDI_EVENT_LEVEL_LBN 33
#define MCDI_EVENT_LEVEL_WIDTH 3
-#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */
-#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */
-#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */
-#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
#define MCDI_EVENT_DATA_OFST 0
#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
@@ -230,9 +308,14 @@
#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
-#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */
-#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */
-#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -247,26 +330,80 @@
#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
#define MCDI_EVENT_FWALERT_REASON_LBN 0
#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
-#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
#define MCDI_EVENT_FLR_VF_LBN 0
#define MCDI_EVENT_FLR_VF_WIDTH 8
#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
-#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */
-#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */
-#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
#define MCDI_EVENT_TX_ERR_INFO_LBN 16
#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
-#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */
-#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */
-#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */
-#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -275,21 +412,60 @@
#define MCDI_EVENT_EV_CODE_WIDTH 4
#define MCDI_EVENT_CODE_LBN 44
#define MCDI_EVENT_CODE_WIDTH 8
-#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */
-#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */
-#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */
-#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */
-#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */
-#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */
-#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */
-#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */
-#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */
-#define MCDI_EVENT_CODE_FLR 0xa /* enum */
-#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */
-#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
-#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
-#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
-#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
@@ -305,15 +481,114 @@
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* Seconds field of timestamp */
#define MCDI_EVENT_PTP_SECONDS_OFST 0
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* Nanoseconds field of timestamp */
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* Lowest four bytes of sourceUUID from PTP packet */
#define MCDI_EVENT_PTP_UUID_OFST 0
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: One or more PPS OUT events */
+#define FCDI_EVENT_CODE_PPS_OUT 0x7
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EVENT_PPS_COUNT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT structuredef */
+#define FCDI_EXTENDED_EVENT_LENMIN 16
+#define FCDI_EXTENDED_EVENT_LENMAX 248
+#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
/***********************************/
@@ -365,11 +640,27 @@
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address */
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to start the datapath CPUs.
+ * This is useful for certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to parse any configuration from
+ * flash. (In addition, the datapath CPUs will not be started, as for
+ * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
+ * certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+/* Address of where to jump after copy. */
#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
/* MC_CMD_COPYCODE_OUT msgresponse */
#define MC_CMD_COPYCODE_OUT_LEN 0
@@ -377,11 +668,13 @@
/***********************************/
/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
*/
#define MC_CMD_SET_FUNC 0x4
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
/* MC_CMD_SET_FUNC_OUT msgresponse */
@@ -390,6 +683,7 @@
/***********************************/
/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
@@ -398,7 +692,10 @@
/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
@@ -410,25 +707,38 @@
/***********************************/
/* MC_CMD_GET_ASSERTS
- * Get and clear any assertion status.
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
*/
#define MC_CMD_GET_ASSERTS 0x6
/* MC_CMD_GET_ASSERTS_IN msgrequest */
#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
/* MC_CMD_GET_ASSERTS_OUT msgresponse */
#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
-#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
@@ -441,9 +751,12 @@
/* MC_CMD_LOG_CTRL_IN msgrequest */
#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -459,11 +772,20 @@
/* MC_CMD_GET_VERSION_IN msgrequest */
#define MC_CMD_GET_VERSION_IN_LEN 0
-/* MC_CMD_GET_VERSION_V0_OUT msgresponse */
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
@@ -471,6 +793,7 @@
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
@@ -478,46 +801,22 @@
#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
-
-/***********************************/
-/* MC_CMD_GET_FPGAREG
- * Read multiple bytes from PTP FPGA.
- */
-#define MC_CMD_GET_FPGAREG 0x9
-
-/* MC_CMD_GET_FPGAREG_IN msgrequest */
-#define MC_CMD_GET_FPGAREG_IN_LEN 8
-#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0
-#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4
-
-/* MC_CMD_GET_FPGAREG_OUT msgresponse */
-#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
-#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252
-#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
-
-
-/***********************************/
-/* MC_CMD_PUT_FPGAREG
- * Write multiple bytes to PTP FPGA.
- */
-#define MC_CMD_PUT_FPGAREG 0xa
-
-/* MC_CMD_PUT_FPGAREG_IN msgrequest */
-#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
-#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252
-#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
-#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
-
-/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
-#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
/***********************************/
@@ -528,32 +827,74 @@
/* MC_CMD_PTP_IN msgrequest */
#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
#define MC_CMD_PTP_IN_OP_OFST 0
#define MC_CMD_PTP_IN_OP_LEN 1
-#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */
-#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */
-#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */
-#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */
-#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */
-#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */
-#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */
-#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */
-#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */
-#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */
-#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */
-#define MC_CMD_PTP_OP_MAX 0xc /* enum */
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x16
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
#define MC_CMD_PTP_IN_CMD_OFST 0
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+/* Event queue for PTP events */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+/* PTP timestamping mode */
#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
-#define MC_CMD_PTP_MODE_V1 0x0 /* enum */
-#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
-#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
-#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
-#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
@@ -566,7 +907,9 @@
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Transmit packet length */
#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+/* Transmit packet data */
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
@@ -586,19 +929,27 @@
#define MC_CMD_PTP_IN_ADJUST_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
-#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of time readings to capture */
#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
@@ -613,86 +964,240 @@
#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Enable or disable packet testing */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset PTP statistics */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
/* MC_CMD_PTP_IN_DEBUG msgrequest */
#define MC_CMD_PTP_IN_DEBUG_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Debug operations */
#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Queueid to send events back */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+/* Number of packets transmitted and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+/* Number of packets received and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+/* Number of packets timestamped by the FPGA */
#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+/* Number of packets filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+/* Number of packets not filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+/* Number of PPS overflows (noise on input?) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+/* Minimum period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+/* Maximum period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+/* Last period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+/* Mean period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+/* Minimum offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+/* Maximum offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+/* Last offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+/* Mean offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+/* Number of nanoseconds waited after reading NIC's hardware clock */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
-#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */
-#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */
-#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */
-#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */
-#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */
-#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */
-#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+/* Number of packets received by FPGA */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+/* Number of packets received by Siena filters */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -702,6 +1207,7 @@
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
@@ -710,6 +1216,7 @@
#define MC_CMD_CSR_READ32_OUT_LENMIN 4
#define MC_CMD_CSR_READ32_OUT_LENMAX 252
#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
@@ -726,6 +1233,7 @@
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
@@ -739,6 +1247,48 @@
/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
/* MC_CMD_STACKINFO
* Get stack information.
*/
@@ -751,6 +1301,7 @@
#define MC_CMD_STACKINFO_OUT_LENMIN 12
#define MC_CMD_STACKINFO_OUT_LENMAX 252
#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
@@ -765,19 +1316,35 @@
/* MC_CMD_MDIO_READ_IN msgrequest */
#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
/* MC_CMD_MDIO_READ_OUT msgresponse */
#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
/***********************************/
@@ -788,18 +1355,34 @@
/* MC_CMD_MDIO_WRITE_IN msgrequest */
#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* enum: Internal. */
/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+/* Value */
#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
/* MC_CMD_MDIO_WRITE_OUT msgresponse */
#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* enum: Good. */
/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
@@ -813,6 +1396,9 @@
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
#define MC_CMD_DBI_WRITE_IN_LENMAX 252
#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
@@ -826,9 +1412,15 @@
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -836,69 +1428,111 @@
/***********************************/
/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map.
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_READ32 0x14
/* MC_CMD_PORT_READ32_IN msgrequest */
#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
/* MC_CMD_PORT_READ32_OUT msgresponse */
#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+/* Status */
#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
/***********************************/
/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map.
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_WRITE32 0x15
/* MC_CMD_PORT_WRITE32_IN msgrequest */
#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+/* Value */
#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
/* MC_CMD_PORT_WRITE32_OUT msgresponse */
#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
/***********************************/
/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map.
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_READ128 0x16
/* MC_CMD_PORT_READ128_IN msgrequest */
#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
/* MC_CMD_PORT_READ128_OUT msgresponse */
#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
/***********************************/
/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map.
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_WRITE128 0x17
/* MC_CMD_PORT_WRITE128_IN msgrequest */
#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+/* Value */
#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
/* MC_CMD_PORT_WRITE128_OUT msgresponse */
#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
/***********************************/
/* MC_CMD_GET_BOARD_CFG
@@ -916,18 +1550,10 @@
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* See MC_CMD_CAPABILITIES */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */
+/* See MC_CMD_CAPABILITIES */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
-/* Enum values, see field(s): */
-/* CAPABILITIES_PORT0 */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
@@ -936,6 +1562,11 @@
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+/* This field contains a 16-bit value for each of the types of NVRAM area. The
+ * values are defined in the firmware/mc/platform/.c file for a specific board
+ * type, but otherwise have no meaning to the MC; they are used by the driver
+ * to manage selection of appropriate firmware updates.
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
@@ -944,7 +1575,7 @@
/***********************************/
/* MC_CMD_DBI_READX
- * Read DBI register(s).
+ * Read DBI register(s) -- extended functionality
*/
#define MC_CMD_DBI_READX 0x19
@@ -952,6 +1583,7 @@
#define MC_CMD_DBI_READX_IN_LENMIN 8
#define MC_CMD_DBI_READX_IN_LENMAX 248
#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
@@ -963,11 +1595,27 @@
#define MC_CMD_DBI_READX_OUT_LENMIN 4
#define MC_CMD_DBI_READX_OUT_LENMAX 252
#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
/***********************************/
/* MC_CMD_SET_RAND_SEED
@@ -977,6 +1625,7 @@
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
@@ -986,7 +1635,7 @@
/***********************************/
/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the PCIE LTSSM.
+ * Retrieve the history of the LTSSM, if the build supports it.
*/
#define MC_CMD_LTSSM_HIST 0x1b
@@ -997,6 +1646,7 @@
#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
@@ -1005,41 +1655,47 @@
/***********************************/
/* MC_CMD_DRV_ATTACH
- * Inform MCPU that this port is managed on the host.
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
*/
#define MC_CMD_DRV_ATTACH 0x1c
/* MC_CMD_DRV_ATTACH_IN msgrequest */
-#define MC_CMD_DRV_ATTACH_IN_LEN 8
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state (0=detached, 1=attached) to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state (0=detached, 1=attached) */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
-
-/***********************************/
-/* MC_CMD_NCSI_PROD
- * Trigger an NC-SI event.
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state (0=detached, 1=attached) */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
*/
-#define MC_CMD_NCSI_PROD 0x1d
-
-/* MC_CMD_NCSI_PROD_IN msgrequest */
-#define MC_CMD_NCSI_PROD_IN_LEN 4
-#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
-#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
-#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
-#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
-#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
-#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
-#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
-#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
-#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
-#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
-
-/* MC_CMD_NCSI_PROD_OUT msgresponse */
-#define MC_CMD_NCSI_PROD_OUT_LEN 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
/***********************************/
@@ -1050,6 +1706,7 @@
/* MC_CMD_SHMUART_IN msgrequest */
#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
#define MC_CMD_SHMUART_IN_FLAG_OFST 0
/* MC_CMD_SHMUART_OUT msgresponse */
@@ -1057,13 +1714,33 @@
/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_ENTITY_RESET
- * Generic per-port reset.
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
*/
#define MC_CMD_ENTITY_RESET 0x20
/* MC_CMD_ENTITY_RESET_IN msgrequest */
#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -1080,7 +1757,9 @@
/* MC_CMD_PCIE_CREDITS_IN msgrequest */
#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+/* wipe statistics */
#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
@@ -1141,7 +1820,7 @@
/***********************************/
/* MC_CMD_PUTS
- * puts(3) implementation over MCDI
+ * Copy the given ASCII string out onto UART and/or out of the network port.
*/
#define MC_CMD_PUTS 0x23
@@ -1167,7 +1846,8 @@
/***********************************/
/* MC_CMD_GET_PHY_CFG
- * Report PHY configuration.
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
*/
#define MC_CMD_GET_PHY_CFG 0x24
@@ -1176,6 +1856,7 @@
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
@@ -1191,7 +1872,9 @@
#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
#define MC_CMD_PHY_CAP_10HDX_LBN 1
#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
@@ -1213,20 +1896,36 @@
#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
#define MC_CMD_PHY_CAP_AN_LBN 10
#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
-#define MC_CMD_MEDIA_XAUI 0x1 /* enum */
-#define MC_CMD_MEDIA_CX4 0x2 /* enum */
-#define MC_CMD_MEDIA_KX4 0x3 /* enum */
-#define MC_CMD_MEDIA_XFP 0x4 /* enum */
-#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */
-#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
-#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
@@ -1234,7 +1933,8 @@
#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
-#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
@@ -1243,18 +1943,31 @@
/***********************************/
/* MC_CMD_START_BIST
- * Start a BIST test on the PHY.
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
*/
#define MC_CMD_START_BIST 0x25
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
#define MC_CMD_START_BIST_IN_TYPE_OFST 0
-#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */
-#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */
-#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */
-#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */
-#define MC_CMD_PHY_BIST 0x5 /* enum */
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
/* MC_CMD_START_BIST_OUT msgresponse */
#define MC_CMD_START_BIST_OUT_LEN 0
@@ -1262,7 +1975,12 @@
/***********************************/
/* MC_CMD_POLL_BIST
- * Poll for BIST completion.
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
*/
#define MC_CMD_POLL_BIST 0x26
@@ -1271,15 +1989,21 @@
/* MC_CMD_POLL_BIST_OUT msgresponse */
#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
-#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */
-#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */
-#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */
-#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
@@ -1287,42 +2011,116 @@
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+/* Status of each channel A */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
+/* Status of each channel C */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
+/* Status of each channel D */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
-#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
/***********************************/
/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s).
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
*/
#define MC_CMD_FLUSH_RX_QUEUES 0x27
@@ -1341,7 +2139,7 @@
/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
- * Get port's loopback modes.
+ * Returns a bitmask of loopback modes available at each speed.
*/
#define MC_CMD_GET_LOOPBACK_MODES 0x28
@@ -1349,61 +2147,116 @@
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
-#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
-#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */
-#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */
-#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */
-#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */
-#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */
-#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */
-#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */
-#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */
-#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */
-#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */
-#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */
-#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */
-#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */
-#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */
-#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */
-#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */
-#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */
-#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */
-#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */
-#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */
-#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */
-#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */
-#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
/***********************************/
/* MC_CMD_GET_LINK
- * Read the unified MAC/PHY link state.
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
*/
#define MC_CMD_GET_LINK 0x29
@@ -1412,9 +2265,15 @@
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side advertised capabilities */
#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+/* link-partner advertised capabilities */
#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+/* Current loopback setting. */
#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
@@ -1427,10 +2286,14 @@
#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-#define MC_CMD_FCNTL_OFF 0x0 /* enum */
-#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */
-#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -1444,13 +2307,16 @@
/***********************************/
/* MC_CMD_SET_LINK
- * Write the unified MAC/PHY link configuration.
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
*/
#define MC_CMD_SET_LINK 0x2a
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
+/* ??? */
#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+/* Flags */
#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
@@ -1458,9 +2324,13 @@
#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
/* MC_CMD_SET_LINK_OUT msgresponse */
@@ -1469,12 +2339,13 @@
/***********************************/
/* MC_CMD_SET_ID_LED
- * Set indentification LED state.
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
*/
#define MC_CMD_SET_ID_LED 0x2b
/* MC_CMD_SET_ID_LED_IN msgrequest */
#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
#define MC_CMD_LED_OFF 0x0 /* enum */
#define MC_CMD_LED_ON 0x1 /* enum */
@@ -1486,12 +2357,15 @@
/***********************************/
/* MC_CMD_SET_MAC
- * Set MAC configuration.
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
*/
#define MC_CMD_SET_MAC 0x2c
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 24
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
#define MC_CMD_SET_MAC_IN_MTU_OFST 0
#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
@@ -1504,10 +2378,14 @@
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-#define MC_CMD_FCNTL_AUTO 0x3 /* enum */
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -1515,12 +2393,18 @@
/***********************************/
/* MC_CMD_PHY_STATS
- * Get generic PHY statistics.
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
*/
#define MC_CMD_PHY_STATS 0x2d
/* MC_CMD_PHY_STATS_IN msgrequest */
#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1534,40 +2418,71 @@
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
-#define MC_CMD_OUI 0x0 /* enum */
-#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */
-#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */
-#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */
-#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */
-#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */
-#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */
-#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */
-#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */
-#define MC_CMD_PCS_LINK_UP 0x9 /* enum */
-#define MC_CMD_PCS_RX_FAULT 0xa /* enum */
-#define MC_CMD_PCS_TX_FAULT 0xb /* enum */
-#define MC_CMD_PCS_BER 0xc /* enum */
-#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */
-#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */
-#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */
-#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */
-#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */
-#define MC_CMD_PHYXS_SYNC 0x12 /* enum */
-#define MC_CMD_AN_LINK_UP 0x13 /* enum */
-#define MC_CMD_AN_COMPLETE 0x14 /* enum */
-#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */
-#define MC_CMD_CL22_LINK_UP 0x16 /* enum */
-#define MC_CMD_PHY_NSTATS 0x17 /* enum */
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
/***********************************/
/* MC_CMD_MAC_STATS
- * Get generic MAC statistics.
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. Returns: 0, ETIME
*/
#define MC_CMD_MAC_STATS 0x2e
/* MC_CMD_MAC_STATS_IN msgrequest */
#define MC_CMD_MAC_STATS_IN_LEN 16
+/* ??? */
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1684,6 +2599,7 @@
/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
@@ -1713,7 +2629,23 @@
/***********************************/
/* MC_CMD_MEMCPY
- * Perform memory copy operation.
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
*/
#define MC_CMD_MEMCPY 0x31
@@ -1721,6 +2653,7 @@
#define MC_CMD_MEMCPY_IN_LENMIN 32
#define MC_CMD_MEMCPY_IN_LENMAX 224
#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
@@ -1741,14 +2674,22 @@
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
-#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */
-#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */
-#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */
-#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */
-#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */
-#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */
-#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
@@ -1818,7 +2759,7 @@
/***********************************/
/* MC_CMD_WOL_FILTER_REMOVE
- * Remove a WoL filter.
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
*/
#define MC_CMD_WOL_FILTER_REMOVE 0x33
@@ -1832,7 +2773,8 @@
/***********************************/
/* MC_CMD_WOL_FILTER_RESET
- * Reset (i.e. remove all) WoL filters.
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
*/
#define MC_CMD_WOL_FILTER_RESET 0x34
@@ -1848,7 +2790,7 @@
/***********************************/
/* MC_CMD_SET_MCAST_HASH
- * Set the MCASH hash value.
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
*/
#define MC_CMD_SET_MCAST_HASH 0x35
@@ -1865,7 +2807,8 @@
/***********************************/
/* MC_CMD_NVRAM_TYPES
- * Get virtual NVRAM partitions information.
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
*/
#define MC_CMD_NVRAM_TYPES 0x36
@@ -1874,26 +2817,54 @@
/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
-#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */
-#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */
-#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */
-#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */
-#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */
-#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */
-#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
/***********************************/
/* MC_CMD_NVRAM_INFO
- * Read info about a virtual NVRAM partition.
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
*/
#define MC_CMD_NVRAM_INFO 0x37
@@ -1913,13 +2884,19 @@
#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
/***********************************/
/* MC_CMD_NVRAM_UPDATE_START
- * Start a group of update operations on a virtual NVRAM partition.
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held).
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
@@ -1935,7 +2912,9 @@
/***********************************/
/* MC_CMD_NVRAM_READ
- * Read data from a virtual NVRAM partition.
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_READ 0x39
@@ -1945,6 +2924,7 @@
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
/* MC_CMD_NVRAM_READ_OUT msgresponse */
@@ -1959,7 +2939,9 @@
/***********************************/
/* MC_CMD_NVRAM_WRITE
- * Write data to a virtual NVRAM partition.
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_WRITE 0x3a
@@ -1983,7 +2965,9 @@
/***********************************/
/* MC_CMD_NVRAM_ERASE
- * Erase sector(s) from a virtual NVRAM partition.
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_ERASE 0x3b
@@ -2001,7 +2985,9 @@
/***********************************/
/* MC_CMD_NVRAM_UPDATE_FINISH
- * Finish a group of update operations on a virtual NVRAM partition.
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
+ * type/offset/length), EACCES (if PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
@@ -2019,6 +3005,20 @@
/***********************************/
/* MC_CMD_REBOOT
* Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
*/
#define MC_CMD_REBOOT 0x3d
@@ -2033,7 +3033,9 @@
/***********************************/
/* MC_CMD_SCHEDINFO
- * Request scheduler info.
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
*/
#define MC_CMD_SCHEDINFO 0x3e
@@ -2052,14 +3054,24 @@
/***********************************/
/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
*/
#define MC_CMD_REBOOT_MODE 0x3f
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
/* MC_CMD_REBOOT_MODE_OUT msgresponse */
#define MC_CMD_REBOOT_MODE_OUT_LEN 4
@@ -2069,32 +3081,145 @@
/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
*/
#define MC_CMD_SENSOR_INFO 0x41
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
-#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */
-#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */
-#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */
-#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */
-#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */
-#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */
-#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */
-#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */
-#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */
-#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */
-#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */
-#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */
-#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */
-#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */
-#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
@@ -2102,6 +3227,23 @@
#define MC_CMD_SENSOR_ENTRY_MINNUM 1
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
@@ -2124,39 +3266,80 @@
/***********************************/
/* MC_CMD_READ_SENSORS
- * Returns the current reading from each sensor.
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
*/
#define MC_CMD_READ_SENSORS 0x42
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+
/* MC_CMD_READ_SENSORS_OUT msgresponse */
#define MC_CMD_READ_SENSORS_OUT_LEN 0
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
-#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
-#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */
-#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */
-#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */
-#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
/***********************************/
/* MC_CMD_GET_PHY_STATE
- * Report current state of PHY.
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
*/
#define MC_CMD_GET_PHY_STATE 0x43
@@ -2166,13 +3349,16 @@
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
-#define MC_CMD_PHY_STATE_OK 0x1 /* enum */
-#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
/***********************************/
/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control.
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
*/
#define MC_CMD_SETUP_8021QBB 0x44
@@ -2187,7 +3373,7 @@
/***********************************/
/* MC_CMD_WOL_FILTER_GET
- * Retrieve ID of any WoL filters.
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
#define MC_CMD_WOL_FILTER_GET 0x45
@@ -2201,7 +3387,8 @@
/***********************************/
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state.
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
*/
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
@@ -2241,7 +3428,8 @@
/***********************************/
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state.
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
*/
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
@@ -2256,7 +3444,7 @@
/***********************************/
/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset.
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
*/
#define MC_CMD_MAC_RESET_RESTORE 0x48
@@ -2269,6 +3457,9 @@
/***********************************/
/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
*/
#define MC_CMD_TESTASSERT 0x49
@@ -2281,14 +3472,23 @@
/***********************************/
/* MC_CMD_WORKAROUND
- * Enable/Disable a given workaround.
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
*/
#define MC_CMD_WORKAROUND 0x4a
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
-#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
@@ -2297,7 +3497,12 @@
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
- * Read media-specific data from PHY.
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
@@ -2309,6 +3514,7 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
@@ -2318,7 +3524,8 @@
/***********************************/
/* MC_CMD_NVRAM_TEST
- * Test a particular NVRAM partition.
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
*/
#define MC_CMD_NVRAM_TEST 0x4c
@@ -2331,22 +3538,31 @@
/* MC_CMD_NVRAM_TEST_OUT msgresponse */
#define MC_CMD_NVRAM_TEST_OUT_LEN 4
#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
-#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */
-#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */
-#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
/***********************************/
/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver.
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
*/
#define MC_CMD_MRSFP_TWEAK 0x4d
/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+/* 0-8 0-8 low->high boost */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
@@ -2354,16 +3570,23 @@
/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+/* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+/* direction */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
/***********************************/
/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits.
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
@@ -2372,9 +3595,13 @@
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
@@ -2396,9 +3623,3640 @@
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requestiong function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+
+/* MC_CMD_INIT_RXQ_IN msgrequest */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+
+/* MC_CMD_INIT_TXQ_IN msgrequest */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to port 0 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to port 1 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
+ * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
+ * a valid handle.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: write a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address or LUE index */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+/* bitmask of the priority queues this txq is inserted into */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to support. */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to insert/remove. */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_STATS
+ * Retrieve rmon rx class statistics
+ */
+#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
+
+/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_CLASS_STATS
+ * Retrieve rmon tx class statistics
+ */
+#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
+
+/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
+ * Retrieve rmon rx super_class statistics
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
+ * Retrieve rmon tx super_class statistics
+ */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_CLASS
+ * Allocate an rmon class
+ */
+#define MC_CMD_RMON_ALLOC_CLASS 0xca
+
+/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_CLASS
+ * Deallocate an rmon class
+ */
+#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
+
+/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
+/* class */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS
+ * Allocate an rmon super_class
+ */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
+ * Deallocate an rmon tx super_class
+ */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
+/* super_class */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_UP_CONV_STATS
+ * Retrieve up converter statistics
+ */
+#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPI_STATS
+ * Retrieve rx ipi stats
+ */
+#define MC_CMD_RMON_RX_IPI_STATS 0xcf
+
+/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve rx ipsec cntxt_ptr indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
+ * Retrieve rx ipsec port indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
+ * Retrieve rx class drop stats
+ */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
+ * Retrieve rx super class drop stats
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPI_STATS
+ * Retrieve tx ipi stats
+ */
+#define MC_CMD_RMON_TX_IPI_STATS 0xd7
+
+/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve tx ipsec counters by cntxt_ptr
+ */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
+ * Retrieve tx ipsec counters by port
+ */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_STATS
+ * Retrieve tx nowhere stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
+ * Retrieve tx nowhere qbb stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 12
+/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 12
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+/* enum: RxDPCPU */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_START_KR_EYE_PLOT
+ * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_START_KR_EYE_PLOT 0xee
+
+/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
+#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
+#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
+
+/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_KR_EYE_PLOT
+ * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
+ * should call this command repeatedly after starting eye plot, until no more
+ * data is returned.
+ */
+#define MC_CMD_POLL_KR_EYE_PLOT 0xef
+
+/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
+#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
+
+/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ */
+#define MC_CMD_LICENSING 0xf3
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 13cb40fe90c1..8d33da6697fb 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -36,7 +36,7 @@ struct efx_mcdi_phy_data {
static int
efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
- u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
size_t outlen;
int rc;
@@ -78,7 +78,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
u32 flags, u32 loopback_mode,
u32 loopback_speed)
{
- u8 inbuf[MC_CMD_SET_LINK_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
@@ -102,7 +102,7 @@ fail:
static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
{
- u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
size_t outlen;
int rc;
@@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
if (rc)
goto fail;
- if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
+ if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
rc = -EIO;
goto fail;
}
@@ -125,16 +126,16 @@ fail:
return rc;
}
-int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad, u16 addr,
- u16 *value_out, u32 *status_out)
+static int efx_mcdi_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
{
- u8 inbuf[MC_CMD_MDIO_READ_IN_LEN];
- u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN];
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
size_t outlen;
int rc;
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
@@ -144,25 +145,27 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
if (rc)
goto fail;
- *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
- *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS);
- return 0;
+ if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
-int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad, u16 addr,
- u16 value, u32 *status_out)
+static int efx_mcdi_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
{
- u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN];
- u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN];
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
size_t outlen;
int rc;
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
@@ -173,7 +176,10 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
if (rc)
goto fail;
- *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS);
+ if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
return 0;
fail:
@@ -304,10 +310,37 @@ static u32 mcdi_to_ethtool_media(u32 media)
}
}
+static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
u32 caps;
int rc;
@@ -403,7 +436,7 @@ fail:
return rc;
}
-int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
+int efx_mcdi_port_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps = (efx->link_advertising ?
@@ -414,37 +447,10 @@ int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
efx->loopback_mode, 0);
}
-void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl)
-{
- switch (fcntl) {
- case MC_CMD_FCNTL_AUTO:
- WARN_ON(1); /* This is not a link mode */
- link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_BIDIR:
- link_state->fc = EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_RESPOND:
- link_state->fc = EFX_FC_RX;
- break;
- default:
- WARN_ON(1);
- case MC_CMD_FCNTL_OFF:
- link_state->fc = 0;
- break;
- }
-
- link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
- link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
- link_state->speed = speed;
-}
-
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
* supported by the link partner. Warn the user if this isn't the case
*/
-void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
@@ -472,7 +478,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
static bool efx_mcdi_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
@@ -507,7 +513,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
ecmd->supported =
@@ -579,7 +585,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
- u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
size_t outlen;
int rc;
@@ -615,17 +621,15 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
unsigned int retry, i, count = 0;
size_t outlen;
u32 status;
- u8 *buf, *ptr;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
+ u8 *ptr;
int rc;
- buf = kzalloc(0x100, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
- MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
- rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
- NULL, 0, NULL);
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
+ inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
if (rc)
goto out;
@@ -633,11 +637,11 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
for (retry = 0; retry < 100; ++retry) {
BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
- buf, 0x100, &outlen);
+ outbuf, sizeof(outbuf), &outlen);
if (rc)
goto out;
- status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
+ status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
if (status != MC_CMD_POLL_BIST_RUNNING)
goto finished;
@@ -654,7 +658,7 @@ finished:
if (efx->phy_type == PHY_TYPE_SFT9001B &&
(bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
- ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
if (status == MC_CMD_POLL_BIST_PASSED &&
outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
for (i = 0; i < 8; i++) {
@@ -668,8 +672,6 @@ finished:
rc = count;
out:
- kfree(buf);
-
return rc;
}
@@ -744,8 +746,8 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
struct ethtool_eeprom *ee, u8 *data)
{
- u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX];
- u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
size_t outlen;
int rc;
unsigned int payload_len;
@@ -785,8 +787,7 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
space_remaining : payload_len;
memcpy(user_data,
- outbuf + page_off +
- MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST,
+ MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off,
to_copy);
space_remaining -= to_copy;
@@ -813,10 +814,10 @@ static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
}
}
-const struct efx_phy_operations efx_mcdi_phy_ops = {
+static const struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
- .reconfigure = efx_mcdi_phy_reconfigure,
+ .reconfigure = efx_mcdi_port_reconfigure,
.poll = efx_mcdi_phy_poll,
.fini = efx_port_dummy_op_void,
.remove = efx_mcdi_phy_remove,
@@ -828,3 +829,199 @@ const struct efx_phy_operations efx_mcdi_phy_ops = {
.get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
.get_module_info = efx_mcdi_phy_get_module_info,
};
+
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ return phy_data->supported_cap;
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+};
+
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
+
+int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 fcntl;
+ MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+ memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr, ETH_ALEN);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* Set simple MAC filter for Siena */
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+ NULL, 0, NULL);
+}
+
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ size_t outlength;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return true;
+ }
+
+ return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
+}
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
+ u32 dma_len, int enable, int clear)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ int rc;
+ int period = enable ? 1000 : 0;
+
+ BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
+ MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, !!enable,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, 1,
+ MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MAC_STATS_IN_PERIODIC_CLEAR, 0,
+ MAC_STATS_IN_PERIODIC_NOEVENT, 1,
+ MAC_STATS_IN_PERIOD_MS, period);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
+ __func__, enable ? "enable" : "disable", rc);
+ return rc;
+}
+
+void efx_mcdi_mac_start_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
+ MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+}
+
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
+{
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+}
+
+int efx_mcdi_port_probe(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Hook in PHY operations table */
+ efx->phy_op = &efx_mcdi_phy_ops;
+
+ /* Set up MDIO structure for PHY */
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->mdio.mdio_read = efx_mcdi_mdio_read;
+ efx->mdio.mdio_write = efx_mcdi_mdio_write;
+
+ /* Fill out MDIO structure, loopback modes, and initial link state */
+ rc = efx->phy_op->probe(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Allocate buffer for stats */
+ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+ if (rc)
+ return rc;
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+
+ return 0;
+}
+
+void efx_mcdi_port_remove(struct efx_nic *efx)
+{
+ efx->phy_op->remove(efx);
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+/* Get physical port number (EF10 only; on Siena it is same as PF number) */
+int efx_mcdi_port_get_number(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
+}
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 9acfd6696ffb..8ff954c59efa 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index a97dbbd2de99..16824fecc5ee 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 08f825b71ac8..a77a8bd2dd70 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -1,194 +1,32 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
-#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
-#include "spi.h"
#include "efx.h"
-#include "nic.h"
-#include "mcdi.h"
-#include "mcdi_pcol.h"
-
-#define EFX_SPI_VERIFY_BUF_LEN 16
-
-struct efx_mtd_partition {
- struct mtd_info mtd;
- union {
- struct {
- bool updating;
- u8 nvram_type;
- u16 fw_subtype;
- } mcdi;
- size_t offset;
- };
- const char *type_name;
- char name[IFNAMSIZ + 20];
-};
-
-struct efx_mtd_ops {
- int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, u8 *buffer);
- int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
- int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, const u8 *buffer);
- int (*sync)(struct mtd_info *mtd);
-};
-
-struct efx_mtd {
- struct list_head node;
- struct efx_nic *efx;
- const struct efx_spi_device *spi;
- const char *name;
- const struct efx_mtd_ops *ops;
- size_t n_parts;
- struct efx_mtd_partition part[0];
-};
-
-#define efx_for_each_partition(part, efx_mtd) \
- for ((part) = &(efx_mtd)->part[0]; \
- (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
- (part)++)
#define to_efx_mtd_partition(mtd) \
container_of(mtd, struct efx_mtd_partition, mtd)
-static int falcon_mtd_probe(struct efx_nic *efx);
-static int siena_mtd_probe(struct efx_nic *efx);
-
-/* SPI utilities */
-
-static int
-efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
-{
- struct efx_mtd *efx_mtd = part->mtd.priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- u8 status;
- int rc, i;
-
- /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
- for (i = 0; i < 40; i++) {
- __set_current_state(uninterruptible ?
- TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
- if (!(status & SPI_STATUS_NRDY))
- return 0;
- if (signal_pending(current))
- return -EINTR;
- }
- pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
- return -ETIMEDOUT;
-}
-
-static int
-efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
-{
- const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
- SPI_STATUS_BP0);
- u8 status;
- int rc;
-
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
-
- if (!(status & unlock_mask))
- return 0; /* already unlocked */
-
- rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
- if (rc)
- return rc;
-
- status &= ~unlock_mask;
- rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
- NULL, sizeof(status));
- if (rc)
- return rc;
- rc = falcon_spi_wait_write(efx, spi);
- if (rc)
- return rc;
-
- return 0;
-}
-
-static int
-efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
-{
- struct efx_mtd *efx_mtd = part->mtd.priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- unsigned pos, block_len;
- u8 empty[EFX_SPI_VERIFY_BUF_LEN];
- u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
- int rc;
-
- if (len != spi->erase_size)
- return -EINVAL;
-
- if (spi->erase_command == 0)
- return -EOPNOTSUPP;
-
- rc = efx_spi_unlock(efx, spi);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
- NULL, 0);
- if (rc)
- return rc;
- rc = efx_spi_slow_wait(part, false);
-
- /* Verify the entire region has been wiped */
- memset(empty, 0xff, sizeof(empty));
- for (pos = 0; pos < len; pos += block_len) {
- block_len = min(len - pos, sizeof(buffer));
- rc = falcon_spi_read(efx, spi, start + pos, block_len,
- NULL, buffer);
- if (rc)
- return rc;
- if (memcmp(empty, buffer, block_len))
- return -EIO;
-
- /* Avoid locking up the system */
- cond_resched();
- if (signal_pending(current))
- return -EINTR;
- }
-
- return rc;
-}
-
/* MTD interface */
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{
- struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = mtd->priv;
int rc;
- rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
+ rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
if (rc == 0) {
erase->state = MTD_ERASE_DONE;
} else {
@@ -202,13 +40,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
static void efx_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = mtd->priv;
int rc;
- rc = efx_mtd->ops->sync(mtd);
+ rc = efx->type->mtd_sync(mtd);
if (rc)
pr_err("%s: %s sync failed (%d)\n",
- part->name, efx_mtd->name, rc);
+ part->name, part->dev_type_name, rc);
}
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -222,62 +60,44 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
ssleep(1);
}
WARN_ON(rc);
+ list_del(&part->node);
}
-static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
-{
- struct efx_mtd_partition *part;
-
- efx_for_each_partition(part, efx_mtd)
- efx_mtd_remove_partition(part);
- list_del(&efx_mtd->node);
- kfree(efx_mtd);
-}
-
-static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
-{
- struct efx_mtd_partition *part;
-
- efx_for_each_partition(part, efx_mtd)
- if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
- snprintf(part->name, sizeof(part->name),
- "%s %s:%02x", efx_mtd->efx->name,
- part->type_name, part->mcdi.fw_subtype);
- else
- snprintf(part->name, sizeof(part->name),
- "%s %s", efx_mtd->efx->name,
- part->type_name);
-}
-
-static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part)
{
struct efx_mtd_partition *part;
+ size_t i;
- efx_mtd->efx = efx;
+ for (i = 0; i < n_parts; i++) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
- efx_mtd_rename_device(efx_mtd);
-
- efx_for_each_partition(part, efx_mtd) {
part->mtd.writesize = 1;
part->mtd.owner = THIS_MODULE;
- part->mtd.priv = efx_mtd;
+ part->mtd.priv = efx;
part->mtd.name = part->name;
part->mtd._erase = efx_mtd_erase;
- part->mtd._read = efx_mtd->ops->read;
- part->mtd._write = efx_mtd->ops->write;
+ part->mtd._read = efx->type->mtd_read;
+ part->mtd._write = efx->type->mtd_write;
part->mtd._sync = efx_mtd_sync;
+ efx->type->mtd_rename(part);
+
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
+
+ /* Add to list in order - efx_mtd_remove() depends on this */
+ list_add_tail(&part->node, &efx->mtd_list);
}
- list_add(&efx_mtd->node, &efx->mtd_list);
return 0;
fail:
- while (part != &efx_mtd->part[0]) {
- --part;
+ while (i--) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
efx_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */
@@ -286,410 +106,28 @@ fail:
void efx_mtd_remove(struct efx_nic *efx)
{
- struct efx_mtd *efx_mtd, *next;
+ struct efx_mtd_partition *parts, *part, *next;
WARN_ON(efx_dev_registered(efx));
- list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
- efx_mtd_remove_device(efx_mtd);
-}
-
-void efx_mtd_rename(struct efx_nic *efx)
-{
- struct efx_mtd *efx_mtd;
-
- ASSERT_RTNL();
-
- list_for_each_entry(efx_mtd, &efx->mtd_list, node)
- efx_mtd_rename_device(efx_mtd);
-}
-
-int efx_mtd_probe(struct efx_nic *efx)
-{
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- return siena_mtd_probe(efx);
- else
- return falcon_mtd_probe(efx);
-}
-
-/* Implementation of MTD operations for Falcon */
-
-static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_read(efx, spi, part->offset + start, len,
- retlen, buffer);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = efx_spi_erase(part, part->offset + start, len);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_write(efx, spi, part->offset + start, len,
- retlen, buffer);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_sync(struct mtd_info *mtd)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- mutex_lock(&nic_data->spi_lock);
- rc = efx_spi_slow_wait(part, true);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static const struct efx_mtd_ops falcon_mtd_ops = {
- .read = falcon_mtd_read,
- .erase = falcon_mtd_erase,
- .write = falcon_mtd_write,
- .sync = falcon_mtd_sync,
-};
-
-static int falcon_mtd_probe(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- struct efx_spi_device *spi;
- struct efx_mtd *efx_mtd;
- int rc = -ENODEV;
-
- ASSERT_RTNL();
-
- spi = &nic_data->spi_flash;
- if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "flash";
- efx_mtd->ops = &falcon_mtd_ops;
-
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_NORFLASH;
- efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
- efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].type_name = "sfc_flash_bootrom";
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc) {
- kfree(efx_mtd);
- return rc;
- }
- }
-
- spi = &nic_data->spi_eeprom;
- if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "EEPROM";
- efx_mtd->ops = &falcon_mtd_ops;
-
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_RAM;
- efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
- efx_mtd->part[0].mtd.size =
- min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
- EFX_EEPROM_BOOTCONFIG_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
- efx_mtd->part[0].type_name = "sfc_bootconfig";
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc) {
- kfree(efx_mtd);
- return rc;
- }
- }
-
- return rc;
-}
-
-/* Implementation of MTD operations for Siena */
-
-static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start;
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk;
- int rc = 0;
-
- while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
- rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
- buffer, chunk);
- if (rc)
- goto out;
- offset += chunk;
- buffer += chunk;
- }
-out:
- *retlen = offset - start;
- return rc;
-}
-
-static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk = part->mtd.erasesize;
- int rc = 0;
-
- if (!part->mcdi.updating) {
- rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
- if (rc)
- goto out;
- part->mcdi.updating = true;
- }
-
- /* The MCDI interface can in fact do multiple erase blocks at once;
- * but erasing may be slow, so we make multiple calls here to avoid
- * tripping the MCDI RPC timeout. */
- while (offset < end) {
- rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
- chunk);
- if (rc)
- goto out;
- offset += chunk;
- }
-out:
- return rc;
-}
-
-static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start;
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk;
- int rc = 0;
-
- if (!part->mcdi.updating) {
- rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
- if (rc)
- goto out;
- part->mcdi.updating = true;
- }
-
- while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
- rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
- buffer, chunk);
- if (rc)
- goto out;
- offset += chunk;
- buffer += chunk;
- }
-out:
- *retlen = offset - start;
- return rc;
-}
-
-static int siena_mtd_sync(struct mtd_info *mtd)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- int rc = 0;
-
- if (part->mcdi.updating) {
- part->mcdi.updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
- }
-
- return rc;
-}
-
-static const struct efx_mtd_ops siena_mtd_ops = {
- .read = siena_mtd_read,
- .erase = siena_mtd_erase,
- .write = siena_mtd_write,
- .sync = siena_mtd_sync,
-};
-
-struct siena_nvram_type_info {
- int port;
- const char *name;
-};
+ if (list_empty(&efx->mtd_list))
+ return;
-static const struct siena_nvram_type_info siena_nvram_types[] = {
- [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
- [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
- [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
- [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
- [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
- [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
- [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
- [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
- [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
- [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
-};
+ parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
+ node);
-static int siena_mtd_probe_partition(struct efx_nic *efx,
- struct efx_mtd *efx_mtd,
- unsigned int part_id,
- unsigned int type)
-{
- struct efx_mtd_partition *part = &efx_mtd->part[part_id];
- const struct siena_nvram_type_info *info;
- size_t size, erase_size;
- bool protected;
- int rc;
-
- if (type >= ARRAY_SIZE(siena_nvram_types) ||
- siena_nvram_types[type].name == NULL)
- return -ENODEV;
-
- info = &siena_nvram_types[type];
-
- if (info->port != efx_port_num(efx))
- return -ENODEV;
-
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
- if (rc)
- return rc;
- if (protected)
- return -ENODEV; /* hide it */
-
- part->mcdi.nvram_type = type;
- part->type_name = info->name;
-
- part->mtd.type = MTD_NORFLASH;
- part->mtd.flags = MTD_CAP_NORFLASH;
- part->mtd.size = size;
- part->mtd.erasesize = erase_size;
+ list_for_each_entry_safe(part, next, &efx->mtd_list, node)
+ efx_mtd_remove_partition(part);
- return 0;
+ kfree(parts);
}
-static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
- struct efx_mtd *efx_mtd)
+void efx_mtd_rename(struct efx_nic *efx)
{
struct efx_mtd_partition *part;
- uint16_t fw_subtype_list[
- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
- int rc;
-
- rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
- if (rc)
- return rc;
-
- efx_for_each_partition(part, efx_mtd)
- part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
-
- return 0;
-}
-
-static int siena_mtd_probe(struct efx_nic *efx)
-{
- struct efx_mtd *efx_mtd;
- int rc = -ENODEV;
- u32 nvram_types;
- unsigned int type;
ASSERT_RTNL();
- rc = efx_mcdi_nvram_types(efx, &nvram_types);
- if (rc)
- return rc;
-
- efx_mtd = kzalloc(sizeof(*efx_mtd) +
- hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->name = "Siena NVRAM manager";
-
- efx_mtd->ops = &siena_mtd_ops;
-
- type = 0;
- efx_mtd->n_parts = 0;
-
- while (nvram_types != 0) {
- if (nvram_types & 1) {
- rc = siena_mtd_probe_partition(efx, efx_mtd,
- efx_mtd->n_parts, type);
- if (rc == 0)
- efx_mtd->n_parts++;
- else if (rc != -ENODEV)
- goto fail;
- }
- type++;
- nvram_types >>= 1;
- }
-
- rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
- if (rc)
- goto fail;
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
-fail:
- if (rc)
- kfree(efx_mtd);
- return rc;
+ list_for_each_entry(part, &efx->mtd_list, node)
+ efx->type->mtd_rename(part);
}
-
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f4c7e6b67743..b172ed133055 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -27,9 +27,11 @@
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
+#include <linux/mtd/mtd.h>
#include "enum.h"
#include "bitfield.h"
+#include "filter.h"
/**************************************************************************
*
@@ -37,7 +39,7 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "3.2"
+#define EFX_DRIVER_VERSION "4.0"
#ifdef DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -93,21 +95,36 @@ struct efx_ptp_data;
struct efx_self_tests;
/**
- * struct efx_special_buffer - An Efx special buffer
- * @addr: CPU base address of the buffer
+ * struct efx_buffer - A general-purpose DMA buffer
+ * @addr: host base address of the buffer
* @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes
- * @index: Buffer index within controller;s buffer table
- * @entries: Number of buffer table entries
*
- * Special buffers are used for the event queues and the TX and RX
- * descriptor queues for each channel. They are *not* used for the
- * actual transmit and receive buffers.
+ * The NIC uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
*/
-struct efx_special_buffer {
+struct efx_buffer {
void *addr;
dma_addr_t dma_addr;
unsigned int len;
+};
+
+/**
+ * struct efx_special_buffer - DMA buffer entered into buffer table
+ * @buf: Standard &struct efx_buffer
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
+ * Event and descriptor rings are addressed via one or more buffer
+ * table entries (and so can be physically non-contiguous, although we
+ * currently do not take advantage of that). On Falcon and Siena we
+ * have to take care of allocating and initialising the entries
+ * ourselves. On later hardware this is managed by the firmware and
+ * @index and @entries are left as 0.
+ */
+struct efx_special_buffer {
+ struct efx_buffer buf;
unsigned int index;
unsigned int entries;
};
@@ -118,6 +135,7 @@ struct efx_special_buffer {
* freed when descriptor completes
* @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
* freed when descriptor completes.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -129,7 +147,10 @@ struct efx_tx_buffer {
const struct sk_buff *skb;
void *heap_buf;
};
- dma_addr_t dma_addr;
+ union {
+ efx_qword_t option;
+ dma_addr_t dma_addr;
+ };
unsigned short flags;
unsigned short len;
unsigned short unmap_len;
@@ -138,6 +159,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
+#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -169,6 +191,7 @@ struct efx_tx_buffer {
* variable indicates that the queue is empty. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
+ * @merge_events: Number of TX merged completion events
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@@ -205,6 +228,7 @@ struct efx_tx_queue {
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
+ unsigned int merge_events;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -244,6 +268,7 @@ struct efx_rx_buffer {
#define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004
#define EFX_RX_PKT_TCP 0x0040
+#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
/**
* struct efx_rx_page_state - Page-based rx buffer state
@@ -271,13 +296,14 @@ struct efx_rx_page_state {
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
- * @enabled: Receive queue enabled indicator.
+ * @refill_enabled: Enable refill whenever fill level is low
* @flush_pending: Set when a RX flush is pending. Has the same lifetime as
* @rxq_flush_pending.
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
- * @scatter_n: Number of buffers used by current packet
+ * @scatter_n: Used by NIC specific receive code.
+ * @scatter_len: Used by NIC specific receive code.
* @page_ring: The ring to store DMA mapped pages for reuse.
* @page_add: Counter to calculate the write pointer for the recycle ring.
* @page_remove: Counter to calculate the read pointer for the recycle ring.
@@ -302,13 +328,14 @@ struct efx_rx_queue {
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
unsigned int ptr_mask;
- bool enabled;
+ bool refill_enabled;
bool flush_pending;
unsigned int added_count;
unsigned int notified_count;
unsigned int removed_count;
unsigned int scatter_n;
+ unsigned int scatter_len;
struct page **page_ring;
unsigned int page_add;
unsigned int page_remove;
@@ -325,22 +352,6 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
};
-/**
- * struct efx_buffer - An Efx general-purpose buffer
- * @addr: host base address of the buffer
- * @dma_addr: DMA base address of the buffer
- * @len: Buffer length, in bytes
- *
- * The NIC uses these buffers for its interrupt status registers and
- * MAC stats dumps.
- */
-struct efx_buffer {
- void *addr;
- dma_addr_t dma_addr;
- unsigned int len;
-};
-
-
enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1,
@@ -357,12 +368,12 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC
* @channel: Channel instance number
* @type: Channel type definition
+ * @eventq_init: Event queue initialised flag
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
- * @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
@@ -378,6 +389,8 @@ enum efx_rx_alloc_method {
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
* lack of descriptors
+ * @n_rx_merge_events: Number of RX merged completion events
+ * @n_rx_merge_packets: Number of RX packets completed by merged events
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -389,12 +402,12 @@ struct efx_channel {
struct efx_nic *efx;
int channel;
const struct efx_channel_type *type;
+ bool eventq_init;
bool enabled;
int irq;
unsigned int irq_moderation;
struct net_device *napi_dev;
struct napi_struct napi_str;
- bool work_pending;
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
@@ -414,6 +427,8 @@ struct efx_channel {
unsigned n_rx_overlength;
unsigned n_skbuff_leaks;
unsigned int n_rx_nodesc_trunc;
+ unsigned int n_rx_merge_events;
+ unsigned int n_rx_merge_packets;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -423,6 +438,21 @@ struct efx_channel {
};
/**
+ * struct efx_msi_context - Context for each MSI
+ * @efx: The associated NIC
+ * @index: Index of the channel/IRQ
+ * @name: Name of the channel/IRQ
+ *
+ * Unlike &struct efx_channel, this is never reallocated and is always
+ * safe for the IRQ handler to access.
+ */
+struct efx_msi_context {
+ struct efx_nic *efx;
+ unsigned int index;
+ char name[IFNAMSIZ + 6];
+};
+
+/**
* struct efx_channel_type - distinguishes traffic and extra channels
* @handle_no_channel: Handle failure to allocate an extra channel
* @pre_probe: Set up extra state prior to initialisation
@@ -579,75 +609,17 @@ static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
return !!(mode & ~PHY_MODE_TX_DISABLED);
}
-/*
- * Efx extended statistics
- *
- * Not all statistics are provided by all supported MACs. The purpose
- * is this structure is to contain the raw statistics provided by each
- * MAC.
+/**
+ * struct efx_hw_stat_desc - Description of a hardware statistic
+ * @name: Name of the statistic as visible through ethtool, or %NULL if
+ * it should not be exposed
+ * @dma_width: Width in bits (0 for non-DMA statistics)
+ * @offset: Offset within stats (ignored for non-DMA statistics)
*/
-struct efx_mac_stats {
- u64 tx_bytes;
- u64 tx_good_bytes;
- u64 tx_bad_bytes;
- u64 tx_packets;
- u64 tx_bad;
- u64 tx_pause;
- u64 tx_control;
- u64 tx_unicast;
- u64 tx_multicast;
- u64 tx_broadcast;
- u64 tx_lt64;
- u64 tx_64;
- u64 tx_65_to_127;
- u64 tx_128_to_255;
- u64 tx_256_to_511;
- u64 tx_512_to_1023;
- u64 tx_1024_to_15xx;
- u64 tx_15xx_to_jumbo;
- u64 tx_gtjumbo;
- u64 tx_collision;
- u64 tx_single_collision;
- u64 tx_multiple_collision;
- u64 tx_excessive_collision;
- u64 tx_deferred;
- u64 tx_late_collision;
- u64 tx_excessive_deferred;
- u64 tx_non_tcpudp;
- u64 tx_mac_src_error;
- u64 tx_ip_src_error;
- u64 rx_bytes;
- u64 rx_good_bytes;
- u64 rx_bad_bytes;
- u64 rx_packets;
- u64 rx_good;
- u64 rx_bad;
- u64 rx_pause;
- u64 rx_control;
- u64 rx_unicast;
- u64 rx_multicast;
- u64 rx_broadcast;
- u64 rx_lt64;
- u64 rx_64;
- u64 rx_65_to_127;
- u64 rx_128_to_255;
- u64 rx_256_to_511;
- u64 rx_512_to_1023;
- u64 rx_1024_to_15xx;
- u64 rx_15xx_to_jumbo;
- u64 rx_gtjumbo;
- u64 rx_bad_lt64;
- u64 rx_bad_64_to_15xx;
- u64 rx_bad_15xx_to_jumbo;
- u64 rx_bad_gtjumbo;
- u64 rx_overflow;
- u64 rx_missed;
- u64 rx_false_carrier;
- u64 rx_symbol_error;
- u64 rx_align_error;
- u64 rx_length_error;
- u64 rx_internal_error;
- u64 rx_good_lt64;
+struct efx_hw_stat_desc {
+ const char *name;
+ u16 dma_width;
+ u16 offset;
};
/* Number of bits used in a multicast filter hash address */
@@ -662,7 +634,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
-struct efx_filter_state;
struct efx_vf;
struct vfdi_status;
@@ -672,7 +643,6 @@ struct vfdi_status;
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
- * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue
@@ -689,7 +659,7 @@ struct vfdi_status;
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
- * @channel_name: Names for channels and their IRQs
+ * @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user.
@@ -707,17 +677,25 @@ struct vfdi_status;
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
* for use in sk_buff::truesize
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
+ * (valid only if @rx_prefix_size != 0; always negative)
+ * @rx_packet_len_offset: Offset of RX packet length from start of packet data
+ * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
+ * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
+ * acknowledge but do nothing else.
* @irq_status: Interrupt status buffer
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @irq_level: IRQ level/index for IRQs not triggered by an event queue
* @selftest_work: Work item for asynchronous self-test
* @mtd_list: List of MTDs attached to the NIC
* @nic_data: Hardware dependent state
+ * @mcdi: Management-Controller-to-Driver Interface state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* efx_monitor() and efx_reconfigure_port()
* @port_enabled: Port enabled indicator.
@@ -737,8 +715,10 @@ struct vfdi_status;
* @link_advertising: Autonegotiation advertising flags
* @link_state: Current state of the link
* @n_link_state_changes: Number of times the link has changed state
- * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
- * @multicast_hash: Multicast hash table
+ * @unicast_filter: Flag for Falcon-arch simple unicast filter.
+ * Protected by @mac_lock.
+ * @multicast_hash: Multicast hash table for Falcon-arch.
+ * Protected by @mac_lock.
* @wanted_fc: Wanted flow control flags
* @fc_disable: When non-zero flow control is disabled. Typically used to
* ensure that network back pressure doesn't delay dma queue flushes.
@@ -747,7 +727,12 @@ struct vfdi_status;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
- * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
+ * @filter_lock: Filter table lock
+ * @filter_state: Architecture-dependent filter table state
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
+ * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
* @rxq_flush_outstanding: Count of number of RX flushes started but not yet
@@ -771,12 +756,8 @@ struct vfdi_status;
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
* field is used by efx_test_interrupts() to verify that an
* interrupt has occurred.
- * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
- * @mac_stats: MAC statistics. These include all statistics the MACs
- * can provide. Generic code converts these into a standard
- * &struct net_device_stats.
- * @stats_lock: Statistics update lock. Serialises statistics fetches
- * and access to @mac_stats.
+ * @stats_lock: Statistics update lock. Must be held when calling
+ * efx_nic_type::{update,start,stop}_stats.
*
* This is stored in the private area of the &struct net_device.
*/
@@ -788,7 +769,6 @@ struct efx_nic {
unsigned int port_num;
const struct efx_nic_type *type;
int legacy_irq;
- bool legacy_irq_enabled;
bool eeh_disabled_legacy_irq;
struct workqueue_struct *workqueue;
char workqueue_name[16];
@@ -806,7 +786,7 @@ struct efx_nic {
unsigned long reset_pending;
struct efx_channel *channel[EFX_MAX_CHANNELS];
- char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
+ struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
@@ -819,6 +799,8 @@ struct efx_nic {
unsigned rx_dc_base;
unsigned sram_lim_qw;
unsigned next_buffer_table;
+
+ unsigned int max_channels;
unsigned n_channels;
unsigned n_rx_channels;
unsigned rss_spread;
@@ -830,6 +812,9 @@ struct efx_nic {
unsigned int rx_page_buf_step;
unsigned int rx_bufs_per_page;
unsigned int rx_pages_per_batch;
+ unsigned int rx_prefix_size;
+ int rx_packet_hash_offset;
+ int rx_packet_len_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@@ -837,6 +822,7 @@ struct efx_nic {
unsigned int_error_count;
unsigned long int_error_expire;
+ bool irq_soft_enabled;
struct efx_buffer irq_status;
unsigned irq_zero_count;
unsigned irq_level;
@@ -847,6 +833,7 @@ struct efx_nic {
#endif
void *nic_data;
+ struct efx_mcdi_data *mcdi;
struct mutex mac_lock;
struct work_struct mac_work;
@@ -868,7 +855,7 @@ struct efx_nic {
struct efx_link_state link_state;
unsigned int n_link_state_changes;
- bool promiscuous;
+ bool unicast_filter;
union efx_multicast_hash multicast_hash;
u8 wanted_fc;
unsigned fc_disable;
@@ -879,9 +866,14 @@ struct efx_nic {
void *loopback_selftest;
- struct efx_filter_state *filter_state;
+ spinlock_t filter_lock;
+ void *filter_state;
+#ifdef CONFIG_RFS_ACCEL
+ u32 *rps_flow_id;
+ unsigned int rps_expire_index;
+#endif
- atomic_t drain_pending;
+ atomic_t active_queues;
atomic_t rxq_flush_pending;
atomic_t rxq_flush_outstanding;
wait_queue_head_t flush_wq;
@@ -907,8 +899,6 @@ struct efx_nic {
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
spinlock_t biu_lock;
int last_irq_cpu;
- unsigned n_rx_nodesc_drop_cnt;
- struct efx_mac_stats mac_stats;
spinlock_t stats_lock;
};
@@ -922,8 +912,17 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
return efx->port_num;
}
+struct efx_mtd_partition {
+ struct list_head node;
+ struct mtd_info mtd;
+ const char *dev_type_name;
+ const char *type_name;
+ char name[IFNAMSIZ + 20];
+};
+
/**
* struct efx_nic_type - Efx device type definition
+ * @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
* @init: Initialise the controller
@@ -938,47 +937,118 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @probe_port: Probe the MAC and PHY
* @remove_port: Free resources allocated by probe_port()
* @handle_global_event: Handle a "global" event (may be %NULL)
+ * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
* @prepare_flush: Prepare the hardware for flushing the DMA queues
- * @finish_flush: Clean up after flushing the DMA queues
- * @update_stats: Update statistics not provided by event handling
+ * (for Falcon architecture)
+ * @finish_flush: Clean up after flushing the DMA queues (for Falcon
+ * architecture)
+ * @describe_stats: Describe statistics for ethtool
+ * @update_stats: Update statistics not provided by event handling.
+ * Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
* @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
* @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
* to the hardware. Serialised by the mac_lock.
* @check_mac_fault: Check MAC fault state. True if fault present.
* @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
- * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
+ * @test_chip: Test registers. May use efx_farch_test_registers(), and is
* expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents
+ * @mcdi_request: Send an MCDI request with the given header and SDU.
+ * The SDU length may be any value from 0 up to the protocol-
+ * defined maximum, but its buffer will be padded to a multiple
+ * of 4 bytes.
+ * @mcdi_poll_response: Test whether an MCDI response is available.
+ * @mcdi_read_response: Read the MCDI response PDU. The offset will
+ * be a multiple of 4. The length may not be, but the buffer
+ * will be padded so it is safe to round up.
+ * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
+ * return an appropriate error code for aborting any current
+ * request; otherwise return 0.
+ * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
+ * be separately enabled after this.
+ * @irq_test_generate: Generate a test IRQ
+ * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
+ * queue must be separately disabled before this.
+ * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
+ * a pointer to the &struct efx_msi_context for the channel.
+ * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
+ * is a pointer to the &struct efx_nic.
+ * @tx_probe: Allocate resources for TX queue
+ * @tx_init: Initialise TX queue on the NIC
+ * @tx_remove: Free resources for TX queue
+ * @tx_write: Write TX descriptors and doorbell
+ * @rx_push_indir_table: Write RSS indirection table to the NIC
+ * @rx_probe: Allocate resources for RX queue
+ * @rx_init: Initialise RX queue on the NIC
+ * @rx_remove: Free resources for RX queue
+ * @rx_write: Write RX descriptors and doorbell
+ * @rx_defer_refill: Generate a refill reminder event
+ * @ev_probe: Allocate resources for event queue
+ * @ev_init: Initialise event queue on the NIC
+ * @ev_fini: Deinitialise event queue on the NIC
+ * @ev_remove: Free resources for event queue
+ * @ev_process: Process events for a queue, up to the given NAPI quota
+ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
+ * @ev_test_generate: Generate a test event
+ * @filter_table_probe: Probe filter capabilities and set up filter software state
+ * @filter_table_restore: Restore filters removed from hardware
+ * @filter_table_remove: Remove filters from hardware and tear down software state
+ * @filter_update_rx_scatter: Update filters after change to rx scatter setting
+ * @filter_insert: add or replace a filter
+ * @filter_remove_safe: remove a filter by ID, carefully
+ * @filter_get_safe: retrieve a filter by ID, carefully
+ * @filter_clear_rx: remove RX filters by priority
+ * @filter_count_rx_used: Get the number of filters in use at a given priority
+ * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
+ * @filter_get_rx_ids: Get list of RX filters at a given priority
+ * @filter_rfs_insert: Add or replace a filter for RFS. This must be
+ * atomic. The hardware change may be asynchronous but should
+ * not be delayed for long. It may fail if this can't be done
+ * atomically.
+ * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
+ * This must check whether the specified table entry is used by RFS
+ * and that rps_may_expire_flow() returns true for it.
+ * @mtd_probe: Probe and add MTD partitions associated with this net device,
+ * using efx_mtd_add()
+ * @mtd_rename: Set an MTD partition name using the net device name
+ * @mtd_read: Read from an MTD partition
+ * @mtd_erase: Erase part of an MTD partition
+ * @mtd_write: Write to an MTD partition
+ * @mtd_sync: Wait for write-back to complete on MTD partition. This
+ * also notifies the driver that a writer has finished using this
+ * partition.
* @revision: Hardware architecture revision
- * @mem_map_size: Memory BAR mapped size
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
* @buf_tbl_base: Buffer table base address
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_hash_size: Size of hash at start of RX packet
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_hash_offset: Offset of RX flow hash within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
- * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
+ * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
+ * @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
- * @phys_addr_channels: Number of channels with physically addressed
- * descriptors
* @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
+ * @mcdi_max_ver: Maximum MCDI version supported
*/
struct efx_nic_type {
+ unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
int (*init)(struct efx_nic *efx);
- void (*dimension_resources)(struct efx_nic *efx);
+ int (*dimension_resources)(struct efx_nic *efx);
void (*fini)(struct efx_nic *efx);
void (*monitor)(struct efx_nic *efx);
enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -987,14 +1057,18 @@ struct efx_nic_type {
int (*probe_port)(struct efx_nic *efx);
void (*remove_port)(struct efx_nic *efx);
bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flush)(struct efx_nic *efx);
void (*finish_flush)(struct efx_nic *efx);
- void (*update_stats)(struct efx_nic *efx);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
int (*reconfigure_port)(struct efx_nic *efx);
+ void (*prepare_enable_fc_tx)(struct efx_nic *efx);
int (*reconfigure_mac)(struct efx_nic *efx);
bool (*check_mac_fault)(struct efx_nic *efx);
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
@@ -1002,22 +1076,90 @@ struct efx_nic_type {
void (*resume_wol)(struct efx_nic *efx);
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
+ void (*mcdi_request)(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len);
+ bool (*mcdi_poll_response)(struct efx_nic *efx);
+ void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
+ size_t pdu_offset, size_t pdu_len);
+ int (*mcdi_poll_reboot)(struct efx_nic *efx);
+ void (*irq_enable_master)(struct efx_nic *efx);
+ void (*irq_test_generate)(struct efx_nic *efx);
+ void (*irq_disable_non_ev)(struct efx_nic *efx);
+ irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
+ irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
+ int (*tx_probe)(struct efx_tx_queue *tx_queue);
+ void (*tx_init)(struct efx_tx_queue *tx_queue);
+ void (*tx_remove)(struct efx_tx_queue *tx_queue);
+ void (*tx_write)(struct efx_tx_queue *tx_queue);
+ void (*rx_push_indir_table)(struct efx_nic *efx);
+ int (*rx_probe)(struct efx_rx_queue *rx_queue);
+ void (*rx_init)(struct efx_rx_queue *rx_queue);
+ void (*rx_remove)(struct efx_rx_queue *rx_queue);
+ void (*rx_write)(struct efx_rx_queue *rx_queue);
+ void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
+ int (*ev_probe)(struct efx_channel *channel);
+ int (*ev_init)(struct efx_channel *channel);
+ void (*ev_fini)(struct efx_channel *channel);
+ void (*ev_remove)(struct efx_channel *channel);
+ int (*ev_process)(struct efx_channel *channel, int quota);
+ void (*ev_read_ack)(struct efx_channel *channel);
+ void (*ev_test_generate)(struct efx_channel *channel);
+ int (*filter_table_probe)(struct efx_nic *efx);
+ void (*filter_table_restore)(struct efx_nic *efx);
+ void (*filter_table_remove)(struct efx_nic *efx);
+ void (*filter_update_rx_scatter)(struct efx_nic *efx);
+ s32 (*filter_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+ int (*filter_remove_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+ int (*filter_get_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+ void (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_count_rx_used)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
+ s32 (*filter_get_rx_ids)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+ s32 (*filter_rfs_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+ bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+#ifdef CONFIG_SFC_MTD
+ int (*mtd_probe)(struct efx_nic *efx);
+ void (*mtd_rename)(struct efx_mtd_partition *part);
+ int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+ int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
+ int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+ int (*mtd_sync)(struct mtd_info *mtd);
+#endif
+ void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
int revision;
- unsigned int mem_map_size;
unsigned int txd_ptr_tbl_base;
unsigned int rxd_ptr_tbl_base;
unsigned int buf_tbl_base;
unsigned int evq_ptr_tbl_base;
unsigned int evq_rptr_tbl_base;
u64 max_dma_mask;
- unsigned int rx_buffer_hash_size;
+ unsigned int rx_prefix_size;
+ unsigned int rx_hash_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
+ bool always_rx_scatter;
unsigned int max_interrupt_mode;
- unsigned int phys_addr_channels;
unsigned int timer_period_max;
netdev_features_t offload_features;
+ int mcdi_max_ver;
+ unsigned int max_rx_ip_filters;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 56ed3bc71e00..e7dbd2dd202e 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2011 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,295 +19,22 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
/**************************************************************************
*
- * Configurable values
- *
- **************************************************************************
- */
-
-/* This is set to 16 for a good reason. In summary, if larger than
- * 16, the descriptor cache holds more than a default socket
- * buffer's worth of packets (for UDP we can only have at most one
- * socket buffer's worth outstanding). This combined with the fact
- * that we only get 1 TX event per descriptor cache means the NIC
- * goes idle.
- */
-#define TX_DC_ENTRIES 16
-#define TX_DC_ENTRIES_ORDER 1
-
-#define RX_DC_ENTRIES 64
-#define RX_DC_ENTRIES_ORDER 3
-
-/* If EFX_MAX_INT_ERRORS internal errors occur within
- * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
- * disable it.
- */
-#define EFX_INT_ERROR_EXPIRE 3600
-#define EFX_MAX_INT_ERRORS 5
-
-/* Depth of RX flush request fifo */
-#define EFX_RX_FLUSH_COUNT 4
-
-/* Driver generated events */
-#define _EFX_CHANNEL_MAGIC_TEST 0x000101
-#define _EFX_CHANNEL_MAGIC_FILL 0x000102
-#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
-#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
-
-#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
-#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
-
-#define EFX_CHANNEL_MAGIC_TEST(_channel) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
-#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
- efx_rx_queue_index(_rx_queue))
-#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
- efx_rx_queue_index(_rx_queue))
-#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
- (_tx_queue)->queue)
-
-static void efx_magic_event(struct efx_channel *channel, u32 magic);
-
-/**************************************************************************
- *
- * Solarstorm hardware access
- *
- **************************************************************************/
-
-static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
- unsigned int index)
-{
- efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
- value, index);
-}
-
-/* Read the current event from the event queue */
-static inline efx_qword_t *efx_event(struct efx_channel *channel,
- unsigned int index)
-{
- return ((efx_qword_t *) (channel->eventq.addr)) +
- (index & channel->eventq_mask);
-}
-
-/* See if an event is present
- *
- * We check both the high and low dword of the event for all ones. We
- * wrote all ones when we cleared the event, and no valid event can
- * have all ones in either its high or low dwords. This approach is
- * robust against reordering.
- *
- * Note that using a single 64-bit comparison is incorrect; even
- * though the CPU read will be atomic, the DMA write may not be.
- */
-static inline int efx_event_present(efx_qword_t *event)
-{
- return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
- EFX_DWORD_IS_ALL_ONES(event->dword[1]));
-}
-
-static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
- const efx_oword_t *mask)
-{
- return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
- ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
-}
-
-int efx_nic_test_registers(struct efx_nic *efx,
- const struct efx_nic_register_test *regs,
- size_t n_regs)
-{
- unsigned address = 0, i, j;
- efx_oword_t mask, imask, original, reg, buf;
-
- for (i = 0; i < n_regs; ++i) {
- address = regs[i].address;
- mask = imask = regs[i].mask;
- EFX_INVERT_OWORD(imask);
-
- efx_reado(efx, &original, address);
-
- /* bit sweep on and off */
- for (j = 0; j < 128; j++) {
- if (!EFX_EXTRACT_OWORD32(mask, j, j))
- continue;
-
- /* Test this testable bit can be set in isolation */
- EFX_AND_OWORD(reg, original, mask);
- EFX_SET_OWORD32(reg, j, j, 1);
-
- efx_writeo(efx, &reg, address);
- efx_reado(efx, &buf, address);
-
- if (efx_masked_compare_oword(&reg, &buf, &mask))
- goto fail;
-
- /* Test this testable bit can be cleared in isolation */
- EFX_OR_OWORD(reg, original, mask);
- EFX_SET_OWORD32(reg, j, j, 0);
-
- efx_writeo(efx, &reg, address);
- efx_reado(efx, &buf, address);
-
- if (efx_masked_compare_oword(&reg, &buf, &mask))
- goto fail;
- }
-
- efx_writeo(efx, &original, address);
- }
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev,
- "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
- " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
- EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
- return -EIO;
-}
-
-/**************************************************************************
- *
- * Special buffer handling
- * Special buffers are used for event queues and the TX and RX
- * descriptor rings.
- *
- *************************************************************************/
-
-/*
- * Initialise a special buffer
- *
- * This will define a buffer (previously allocated via
- * efx_alloc_special_buffer()) in the buffer table, allowing
- * it to be used for event queues, descriptor rings etc.
- */
-static void
-efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- efx_qword_t buf_desc;
- unsigned int index;
- dma_addr_t dma_addr;
- int i;
-
- EFX_BUG_ON_PARANOID(!buffer->addr);
-
- /* Write buffer descriptors to NIC */
- for (i = 0; i < buffer->entries; i++) {
- index = buffer->index + i;
- dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
- netif_dbg(efx, probe, efx->net_dev,
- "mapping special buffer %d at %llx\n",
- index, (unsigned long long)dma_addr);
- EFX_POPULATE_QWORD_3(buf_desc,
- FRF_AZ_BUF_ADR_REGION, 0,
- FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
- FRF_AZ_BUF_OWNER_ID_FBUF, 0);
- efx_write_buf_tbl(efx, &buf_desc, index);
- }
-}
-
-/* Unmaps a buffer and clears the buffer table entries */
-static void
-efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- efx_oword_t buf_tbl_upd;
- unsigned int start = buffer->index;
- unsigned int end = (buffer->index + buffer->entries - 1);
-
- if (!buffer->entries)
- return;
-
- netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
- buffer->index, buffer->index + buffer->entries - 1);
-
- EFX_POPULATE_OWORD_4(buf_tbl_upd,
- FRF_AZ_BUF_UPD_CMD, 0,
- FRF_AZ_BUF_CLR_CMD, 1,
- FRF_AZ_BUF_CLR_END_ID, end,
- FRF_AZ_BUF_CLR_START_ID, start);
- efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
-}
-
-/*
- * Allocate a new special buffer
- *
- * This allocates memory for a new buffer, clears it and allocates a
- * new buffer ID range. It does not write into the buffer table.
- *
- * This call will allocate 4KB buffers, since 8KB buffers can't be
- * used for event queues and descriptor rings.
- */
-static int efx_alloc_special_buffer(struct efx_nic *efx,
- struct efx_special_buffer *buffer,
- unsigned int len)
-{
- len = ALIGN(len, EFX_BUF_SIZE);
-
- buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, GFP_KERNEL);
- if (!buffer->addr)
- return -ENOMEM;
- buffer->len = len;
- buffer->entries = len / EFX_BUF_SIZE;
- BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
-
- /* Select new buffer ID */
- buffer->index = efx->next_buffer_table;
- efx->next_buffer_table += buffer->entries;
-#ifdef CONFIG_SFC_SRIOV
- BUG_ON(efx_sriov_enabled(efx) &&
- efx->vf_buftbl_base < efx->next_buffer_table);
-#endif
-
- netif_dbg(efx, probe, efx->net_dev,
- "allocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
-
- return 0;
-}
-
-static void
-efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- if (!buffer->addr)
- return;
-
- netif_dbg(efx, hw, efx->net_dev,
- "deallocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, buffer->len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
-
- dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
- buffer->dma_addr);
- buffer->addr = NULL;
- buffer->entries = 0;
-}
-
-/**************************************************************************
- *
* Generic buffer handling
* These buffers are used for interrupt status, MAC stats, etc.
*
**************************************************************************/
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
- unsigned int len)
+ unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr,
- GFP_ATOMIC | __GFP_ZERO);
+ buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
@@ -323,1057 +50,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
}
}
-/**************************************************************************
- *
- * TX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified transmit descriptor in the TX
- * descriptor queue belonging to the specified channel.
- */
-static inline efx_qword_t *
-efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
-{
- return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
-}
-
-/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
-static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
-{
- unsigned write_ptr;
- efx_dword_t reg;
-
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(tx_queue->efx, &reg,
- FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
-}
-
-/* Write pointer and first descriptor for TX descriptor ring */
-static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
- const efx_qword_t *txd)
-{
- unsigned write_ptr;
- efx_oword_t reg;
-
- BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
- BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
-
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
- FRF_AZ_TX_DESC_WPTR, write_ptr);
- reg.qword[0] = *txd;
- efx_writeo_page(tx_queue->efx, &reg,
- FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
-}
-
-static inline bool
-efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
-{
- unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
- if (empty_read_count == 0)
- return false;
-
- tx_queue->empty_read_count = 0;
- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
- && tx_queue->write_count - write_count == 1;
-}
-
-/* For each entry inserted into the software descriptor ring, create a
- * descriptor in the hardware TX descriptor ring (in host memory), and
- * write a doorbell.
- */
-void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
-{
-
- struct efx_tx_buffer *buffer;
- efx_qword_t *txd;
- unsigned write_ptr;
- unsigned old_write_count = tx_queue->write_count;
-
- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
-
- do {
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[write_ptr];
- txd = efx_tx_desc(tx_queue, write_ptr);
- ++tx_queue->write_count;
-
- /* Create TX descriptor ring entry */
- BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
- EFX_POPULATE_QWORD_4(*txd,
- FSF_AZ_TX_KER_CONT,
- buffer->flags & EFX_TX_BUF_CONT,
- FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
- FSF_AZ_TX_KER_BUF_REGION, 0,
- FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
- } while (tx_queue->write_count != tx_queue->insert_count);
-
- wmb(); /* Ensure descriptors are written before they are fetched */
-
- if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
- txd = efx_tx_desc(tx_queue,
- old_write_count & tx_queue->ptr_mask);
- efx_push_tx_desc(tx_queue, txd);
- ++tx_queue->pushes;
- } else {
- efx_notify_tx_desc(tx_queue);
- }
-}
-
-/* Allocate hardware resources for a TX queue */
-int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned entries;
-
- entries = tx_queue->ptr_mask + 1;
- return efx_alloc_special_buffer(efx, &tx_queue->txd,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t reg;
-
- /* Pin TX descriptor ring */
- efx_init_special_buffer(efx, &tx_queue->txd);
-
- /* Push TX descriptor ring to card */
- EFX_POPULATE_OWORD_10(reg,
- FRF_AZ_TX_DESCQ_EN, 1,
- FRF_AZ_TX_ISCSI_DDIG_EN, 0,
- FRF_AZ_TX_ISCSI_HDIG_EN, 0,
- FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
- FRF_AZ_TX_DESCQ_EVQ_ID,
- tx_queue->channel->channel,
- FRF_AZ_TX_DESCQ_OWNER_ID, 0,
- FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
- FRF_AZ_TX_DESCQ_SIZE,
- __ffs(tx_queue->txd.entries),
- FRF_AZ_TX_DESCQ_TYPE, 0,
- FRF_BZ_TX_NON_IP_DROP_DIS, 1);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
- !csum);
- }
-
- efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
- tx_queue->queue);
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
- /* Only 128 bits in this register */
- BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
-
- efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
- if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
- __clear_bit_le(tx_queue->queue, &reg);
- else
- __set_bit_le(tx_queue->queue, &reg);
- efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
- }
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_1(reg,
- FRF_BZ_TX_PACE,
- (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
- FFE_BZ_TX_PACE_OFF :
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
- tx_queue->queue);
- }
-}
-
-static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t tx_flush_descq;
-
- WARN_ON(atomic_read(&tx_queue->flush_outstanding));
- atomic_set(&tx_queue->flush_outstanding, 1);
-
- EFX_POPULATE_OWORD_2(tx_flush_descq,
- FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
- efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
-}
-
-void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t tx_desc_ptr;
-
- /* Remove TX descriptor ring from card */
- EFX_ZERO_OWORD(tx_desc_ptr);
- efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
- tx_queue->queue);
-
- /* Unpin TX descriptor ring */
- efx_fini_special_buffer(efx, &tx_queue->txd);
-}
-
-/* Free buffers backing TX queue */
-void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
-{
- efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
-}
-
-/**************************************************************************
- *
- * RX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified descriptor in the RX descriptor queue */
-static inline efx_qword_t *
-efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
-{
- return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
-}
-
-/* This creates an entry in the RX descriptor queue */
-static inline void
-efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
-{
- struct efx_rx_buffer *rx_buf;
- efx_qword_t *rxd;
-
- rxd = efx_rx_desc(rx_queue, index);
- rx_buf = efx_rx_buffer(rx_queue, index);
- EFX_POPULATE_QWORD_3(*rxd,
- FSF_AZ_RX_KER_BUF_SIZE,
- rx_buf->len -
- rx_queue->efx->type->rx_buffer_padding,
- FSF_AZ_RX_KER_BUF_REGION, 0,
- FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
-}
-
-/* This writes to the RX_DESC_WPTR register for the specified receive
- * descriptor ring.
- */
-void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- efx_dword_t reg;
- unsigned write_ptr;
-
- while (rx_queue->notified_count != rx_queue->added_count) {
- efx_build_rx_desc(
- rx_queue,
- rx_queue->notified_count & rx_queue->ptr_mask);
- ++rx_queue->notified_count;
- }
-
- wmb();
- write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
- efx_rx_queue_index(rx_queue));
-}
-
-int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned entries;
-
- entries = rx_queue->ptr_mask + 1;
- return efx_alloc_special_buffer(efx, &rx_queue->rxd,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
-{
- efx_oword_t rx_desc_ptr;
- struct efx_nic *efx = rx_queue->efx;
- bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
- bool iscsi_digest_en = is_b0;
- bool jumbo_en;
-
- /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
- * DMA to continue after a PCIe page boundary (and scattering
- * is not possible). In Falcon B0 and Siena, it enables
- * scatter.
- */
- jumbo_en = !is_b0 || efx->rx_scatter;
-
- netif_dbg(efx, hw, efx->net_dev,
- "RX queue %d ring in special buffers %d-%d\n",
- efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
- rx_queue->rxd.index + rx_queue->rxd.entries - 1);
-
- rx_queue->scatter_n = 0;
-
- /* Pin RX descriptor ring */
- efx_init_special_buffer(efx, &rx_queue->rxd);
-
- /* Push RX descriptor ring to card */
- EFX_POPULATE_OWORD_10(rx_desc_ptr,
- FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
- FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
- FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
- FRF_AZ_RX_DESCQ_EVQ_ID,
- efx_rx_queue_channel(rx_queue)->channel,
- FRF_AZ_RX_DESCQ_OWNER_ID, 0,
- FRF_AZ_RX_DESCQ_LABEL,
- efx_rx_queue_index(rx_queue),
- FRF_AZ_RX_DESCQ_SIZE,
- __ffs(rx_queue->rxd.entries),
- FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
- FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
- FRF_AZ_RX_DESCQ_EN, 1);
- efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- efx_rx_queue_index(rx_queue));
-}
-
-static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- efx_oword_t rx_flush_descq;
-
- EFX_POPULATE_OWORD_2(rx_flush_descq,
- FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_RX_FLUSH_DESCQ,
- efx_rx_queue_index(rx_queue));
- efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
-}
-
-void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
-{
- efx_oword_t rx_desc_ptr;
- struct efx_nic *efx = rx_queue->efx;
-
- /* Remove RX descriptor ring from card */
- EFX_ZERO_OWORD(rx_desc_ptr);
- efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- efx_rx_queue_index(rx_queue));
-
- /* Unpin RX descriptor ring */
- efx_fini_special_buffer(efx, &rx_queue->rxd);
-}
-
-/* Free buffers backing RX queue */
-void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
-{
- efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
-}
-
-/**************************************************************************
- *
- * Flush handling
- *
- **************************************************************************/
-
-/* efx_nic_flush_queues() must be woken up when all flushes are completed,
- * or more RX flushes can be kicked off.
- */
-static bool efx_flush_wake(struct efx_nic *efx)
-{
- /* Ensure that all updates are visible to efx_nic_flush_queues() */
- smp_mb();
-
- return (atomic_read(&efx->drain_pending) == 0 ||
- (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
- && atomic_read(&efx->rxq_flush_pending) > 0));
-}
-
-static bool efx_check_tx_flush_complete(struct efx_nic *efx)
-{
- bool i = true;
- efx_oword_t txd_ptr_tbl;
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_reado_table(efx, &txd_ptr_tbl,
- FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
- if (EFX_OWORD_FIELD(txd_ptr_tbl,
- FRF_AZ_TX_DESCQ_FLUSH) ||
- EFX_OWORD_FIELD(txd_ptr_tbl,
- FRF_AZ_TX_DESCQ_EN)) {
- netif_dbg(efx, hw, efx->net_dev,
- "flush did not complete on TXQ %d\n",
- tx_queue->queue);
- i = false;
- } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
- 1, 0)) {
- /* The flush is complete, but we didn't
- * receive a flush completion event
- */
- netif_dbg(efx, hw, efx->net_dev,
- "flush complete on TXQ %d, so drain "
- "the queue\n", tx_queue->queue);
- /* Don't need to increment drain_pending as it
- * has already been incremented for the queues
- * which did not drain
- */
- efx_magic_event(channel,
- EFX_CHANNEL_MAGIC_TX_DRAIN(
- tx_queue));
- }
- }
- }
-
- return i;
-}
-
-/* Flush all the transmit queues, and continue flushing receive queues until
- * they're all flushed. Wait for the DRAIN events to be recieved so that there
- * are no more RX and TX events left on any channel. */
-int efx_nic_flush_queues(struct efx_nic *efx)
-{
- unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int rc = 0;
-
- efx->type->prepare_flush(efx);
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- atomic_inc(&efx->drain_pending);
- efx_flush_tx_queue(tx_queue);
- }
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- atomic_inc(&efx->drain_pending);
- rx_queue->flush_pending = true;
- atomic_inc(&efx->rxq_flush_pending);
- }
- }
-
- while (timeout && atomic_read(&efx->drain_pending) > 0) {
- /* If SRIOV is enabled, then offload receive queue flushing to
- * the firmware (though we will still have to poll for
- * completion). If that fails, fall back to the old scheme.
- */
- if (efx_sriov_enabled(efx)) {
- rc = efx_mcdi_flush_rxqs(efx);
- if (!rc)
- goto wait;
- }
-
- /* The hardware supports four concurrent rx flushes, each of
- * which may need to be retried if there is an outstanding
- * descriptor fetch
- */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (atomic_read(&efx->rxq_flush_outstanding) >=
- EFX_RX_FLUSH_COUNT)
- break;
-
- if (rx_queue->flush_pending) {
- rx_queue->flush_pending = false;
- atomic_dec(&efx->rxq_flush_pending);
- atomic_inc(&efx->rxq_flush_outstanding);
- efx_flush_rx_queue(rx_queue);
- }
- }
- }
-
- wait:
- timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
- timeout);
- }
-
- if (atomic_read(&efx->drain_pending) &&
- !efx_check_tx_flush_complete(efx)) {
- netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
- "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
- atomic_read(&efx->rxq_flush_outstanding),
- atomic_read(&efx->rxq_flush_pending));
- rc = -ETIMEDOUT;
-
- atomic_set(&efx->drain_pending, 0);
- atomic_set(&efx->rxq_flush_pending, 0);
- atomic_set(&efx->rxq_flush_outstanding, 0);
- }
-
- efx->type->finish_flush(efx);
-
- return rc;
-}
-
-/**************************************************************************
- *
- * Event queue processing
- * Event queues are processed by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Update a channel's event queue's read pointer (RPTR) register
- *
- * This writes the EVQ_RPTR_REG register for the specified channel's
- * event queue.
- */
-void efx_nic_eventq_read_ack(struct efx_channel *channel)
-{
- efx_dword_t reg;
- struct efx_nic *efx = channel->efx;
-
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
- channel->eventq_read_ptr & channel->eventq_mask);
-
- /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
- * of 4 bytes, but it is really 16 bytes just like later revisions.
- */
- efx_writed(efx, &reg,
- efx->type->evq_rptr_tbl_base +
- FR_BZ_EVQ_RPTR_STEP * channel->channel);
-}
-
-/* Use HW to insert a SW defined event */
-void efx_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event)
-{
- efx_oword_t drv_ev_reg;
-
- BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
- FRF_AZ_DRV_EV_DATA_WIDTH != 64);
- drv_ev_reg.u32[0] = event->u32[0];
- drv_ev_reg.u32[1] = event->u32[1];
- drv_ev_reg.u32[2] = 0;
- drv_ev_reg.u32[3] = 0;
- EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
- efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
-}
-
-static void efx_magic_event(struct efx_channel *channel, u32 magic)
-{
- efx_qword_t event;
-
- EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
- FSE_AZ_EV_CODE_DRV_GEN_EV,
- FSF_AZ_DRV_GEN_EV_MAGIC, magic);
- efx_generate_event(channel->efx, channel->channel, &event);
-}
-
-/* Handle a transmit completion event
- *
- * The NIC batches TX completion events; the message we receive is of
- * the form "complete all TX events up to this index".
- */
-static int
-efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
-{
- unsigned int tx_ev_desc_ptr;
- unsigned int tx_ev_q_label;
- struct efx_tx_queue *tx_queue;
- struct efx_nic *efx = channel->efx;
- int tx_packets = 0;
-
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
- return 0;
-
- if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
- /* Transmit completion */
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
- tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
- tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
- tx_queue->ptr_mask);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr);
- } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
- /* Rewrite the FIFO write pointer */
- tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
-
- netif_tx_lock(efx->net_dev);
- efx_notify_tx_desc(tx_queue);
- netif_tx_unlock(efx->net_dev);
- } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
- EFX_WORKAROUND_10727(efx)) {
- efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
- } else {
- netif_err(efx, tx_err, efx->net_dev,
- "channel %d unexpected TX event "
- EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(*event));
- }
-
- return tx_packets;
-}
-
-/* Detect errors included in the rx_evt_pkt_ok bit. */
-static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
- const efx_qword_t *event)
-{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- struct efx_nic *efx = rx_queue->efx;
- bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
- bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
- bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
- bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
-
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
- rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
- rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
- rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
- rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
- rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
- rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
- rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
- 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
- rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
-
- /* Every error apart from tobe_disc and pause_frm */
- rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
- rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
- rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
-
- /* Count errors that are not in MAC stats. Ignore expected
- * checksum errors during self-test. */
- if (rx_ev_frm_trunc)
- ++channel->n_rx_frm_trunc;
- else if (rx_ev_tobe_disc)
- ++channel->n_rx_tobe_disc;
- else if (!efx->loopback_selftest) {
- if (rx_ev_ip_hdr_chksum_err)
- ++channel->n_rx_ip_hdr_chksum_err;
- else if (rx_ev_tcp_udp_chksum_err)
- ++channel->n_rx_tcp_udp_chksum_err;
- }
-
- /* TOBE_DISC is expected on unicast mismatches; don't print out an
- * error message. FRM_TRUNC indicates RXDP dropped the packet due
- * to a FIFO overflow.
- */
-#ifdef DEBUG
- if (rx_ev_other_err && net_ratelimit()) {
- netif_dbg(efx, rx_err, efx->net_dev,
- " RX queue %d unexpected RX event "
- EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
- efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
- rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
- rx_ev_ip_hdr_chksum_err ?
- " [IP_HDR_CHKSUM_ERR]" : "",
- rx_ev_tcp_udp_chksum_err ?
- " [TCP_UDP_CHKSUM_ERR]" : "",
- rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
- rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
- rx_ev_drib_nib ? " [DRIB_NIB]" : "",
- rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
- rx_ev_pause_frm ? " [PAUSE]" : "");
- }
-#endif
-
- /* The frame must be discarded if any of these are true. */
- return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
- rx_ev_tobe_disc | rx_ev_pause_frm) ?
- EFX_RX_PKT_DISCARD : 0;
-}
-
-/* Handle receive events that are not in-order. Return true if this
- * can be handled as a partial packet discard, false if it's more
- * serious.
- */
-static bool
-efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
-{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- struct efx_nic *efx = rx_queue->efx;
- unsigned expected, dropped;
-
- if (rx_queue->scatter_n &&
- index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
- rx_queue->ptr_mask)) {
- ++channel->n_rx_nodesc_trunc;
- return true;
- }
-
- expected = rx_queue->removed_count & rx_queue->ptr_mask;
- dropped = (index - expected) & rx_queue->ptr_mask;
- netif_info(efx, rx_err, efx->net_dev,
- "dropped %d events (index=%d expected=%d)\n",
- dropped, index, expected);
-
- efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
- RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
- return false;
-}
-
-/* Handle a packet received event
- *
- * The NIC gives a "discard" flag if it's a unicast packet with the
- * wrong destination address
- * Also "is multicast" and "matches multicast filter" flags can be used to
- * discard non-matching multicast packets.
- */
-static void
-efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
-{
- unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
- unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned expected_ptr;
- bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
- u16 flags;
- struct efx_rx_queue *rx_queue;
- struct efx_nic *efx = channel->efx;
-
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
- return;
-
- rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
- rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
- channel->channel);
-
- rx_queue = efx_channel_get_rx_queue(channel);
-
- rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
- expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
- rx_queue->ptr_mask);
-
- /* Check for partial drops and other errors */
- if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
- unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
- if (rx_ev_desc_ptr != expected_ptr &&
- !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
- return;
-
- /* Discard all pending fragments */
- if (rx_queue->scatter_n) {
- efx_rx_packet(
- rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
- rx_queue->removed_count += rx_queue->scatter_n;
- rx_queue->scatter_n = 0;
- }
-
- /* Return if there is no new fragment */
- if (rx_ev_desc_ptr != expected_ptr)
- return;
-
- /* Discard new fragment if not SOP */
- if (!rx_ev_sop) {
- efx_rx_packet(
- rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- 1, 0, EFX_RX_PKT_DISCARD);
- ++rx_queue->removed_count;
- return;
- }
- }
-
- ++rx_queue->scatter_n;
- if (rx_ev_cont)
- return;
-
- rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
- rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
-
- if (likely(rx_ev_pkt_ok)) {
- /* If packet is marked as OK then we can rely on the
- * hardware checksum and classification.
- */
- flags = 0;
- switch (rx_ev_hdr_type) {
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
- flags |= EFX_RX_PKT_TCP;
- /* fall through */
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
- flags |= EFX_RX_PKT_CSUMMED;
- /* fall through */
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
- case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
- break;
- }
- } else {
- flags = efx_handle_rx_not_ok(rx_queue, event);
- }
-
- /* Detect multicast packets that didn't match the filter */
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
- if (rx_ev_mcast_pkt) {
- unsigned int rx_ev_mcast_hash_match =
- EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
-
- if (unlikely(!rx_ev_mcast_hash_match)) {
- ++channel->n_rx_mcast_mismatch;
- flags |= EFX_RX_PKT_DISCARD;
- }
- }
-
- channel->irq_mod_score += 2;
-
- /* Handle received packet */
- efx_rx_packet(rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, rx_ev_byte_cnt, flags);
- rx_queue->removed_count += rx_queue->scatter_n;
- rx_queue->scatter_n = 0;
-}
-
-/* If this flush done event corresponds to a &struct efx_tx_queue, then
- * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
- * of all transmit completions.
- */
-static void
-efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
-{
- struct efx_tx_queue *tx_queue;
- int qid;
-
- qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
- if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
- tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
- qid % EFX_TXQ_TYPES);
- if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
- efx_magic_event(tx_queue->channel,
- EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
- }
- }
-}
-
-/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
- * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
- * the RX queue back to the mask of RX queues in need of flushing.
- */
-static void
-efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- int qid;
- bool failed;
-
- qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
- failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
- if (qid >= efx->n_channels)
- return;
- channel = efx_get_channel(efx, qid);
- if (!efx_channel_has_rx_queue(channel))
- return;
- rx_queue = efx_channel_get_rx_queue(channel);
-
- if (failed) {
- netif_info(efx, hw, efx->net_dev,
- "RXQ %d flush retry\n", qid);
- rx_queue->flush_pending = true;
- atomic_inc(&efx->rxq_flush_pending);
- } else {
- efx_magic_event(efx_rx_queue_channel(rx_queue),
- EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
- }
- atomic_dec(&efx->rxq_flush_outstanding);
- if (efx_flush_wake(efx))
- wake_up(&efx->flush_wq);
-}
-
-static void
-efx_handle_drain_event(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- WARN_ON(atomic_read(&efx->drain_pending) == 0);
- atomic_dec(&efx->drain_pending);
- if (efx_flush_wake(efx))
- wake_up(&efx->flush_wq);
-}
-
-static void
-efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- struct efx_rx_queue *rx_queue =
- efx_channel_has_rx_queue(channel) ?
- efx_channel_get_rx_queue(channel) : NULL;
- unsigned magic, code;
-
- magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
- code = _EFX_CHANNEL_MAGIC_CODE(magic);
-
- if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
- channel->event_test_cpu = raw_smp_processor_id();
- } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
- /* The queue must be empty, so we won't receive any rx
- * events, so efx_process_channel() won't refill the
- * queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue);
- } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
- rx_queue->enabled = false;
- efx_handle_drain_event(channel);
- } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
- efx_handle_drain_event(channel);
- } else {
- netif_dbg(efx, hw, efx->net_dev, "channel %d received "
- "generated event "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(*event));
- }
-}
-
-static void
-efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- unsigned int ev_sub_code;
- unsigned int ev_sub_data;
-
- ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
- ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
-
- switch (ev_sub_code) {
- case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
- channel->channel, ev_sub_data);
- efx_handle_tx_flush_done(efx, event);
- efx_sriov_tx_flush_done(efx, event);
- break;
- case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
- channel->channel, ev_sub_data);
- efx_handle_rx_flush_done(efx, event);
- efx_sriov_rx_flush_done(efx, event);
- break;
- case FSE_AZ_EVQ_INIT_DONE_EV:
- netif_dbg(efx, hw, efx->net_dev,
- "channel %d EVQ %d initialised\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AZ_SRM_UPD_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d SRAM update done\n", channel->channel);
- break;
- case FSE_AZ_WAKE_UP_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d RXQ %d wakeup event\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AZ_TIMER_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d RX queue %d timer expired\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AA_RX_RECOVER_EV:
- netif_err(efx, rx_err, efx->net_dev,
- "channel %d seen DRIVER RX_RESET event. "
- "Resetting.\n", channel->channel);
- atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx,
- EFX_WORKAROUND_6555(efx) ?
- RESET_TYPE_RX_RECOVERY :
- RESET_TYPE_DISABLE);
- break;
- case FSE_BZ_RX_DSC_ERROR_EV:
- if (ev_sub_data < EFX_VI_BASE) {
- netif_err(efx, rx_err, efx->net_dev,
- "RX DMA Q %d reports descriptor fetch error."
- " RX Q %d is disabled.\n", ev_sub_data,
- ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
- } else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
- break;
- case FSE_BZ_TX_DSC_ERROR_EV:
- if (ev_sub_data < EFX_VI_BASE) {
- netif_err(efx, tx_err, efx->net_dev,
- "TX DMA Q %d reports descriptor fetch error."
- " TX Q %d is disabled.\n", ev_sub_data,
- ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
- } else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
- break;
- default:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d unknown driver event code %d "
- "data %04x\n", channel->channel, ev_sub_code,
- ev_sub_data);
- break;
- }
-}
-
-int efx_nic_process_eventq(struct efx_channel *channel, int budget)
-{
- struct efx_nic *efx = channel->efx;
- unsigned int read_ptr;
- efx_qword_t event, *p_event;
- int ev_code;
- int tx_packets = 0;
- int spent = 0;
-
- read_ptr = channel->eventq_read_ptr;
-
- for (;;) {
- p_event = efx_event(channel, read_ptr);
- event = *p_event;
-
- if (!efx_event_present(&event))
- /* End of events */
- break;
-
- netif_vdbg(channel->efx, intr, channel->efx->net_dev,
- "channel %d event is "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(event));
-
- /* Clear this event by marking it all ones */
- EFX_SET_QWORD(*p_event);
-
- ++read_ptr;
-
- ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
-
- switch (ev_code) {
- case FSE_AZ_EV_CODE_RX_EV:
- efx_handle_rx_event(channel, &event);
- if (++spent == budget)
- goto out;
- break;
- case FSE_AZ_EV_CODE_TX_EV:
- tx_packets += efx_handle_tx_event(channel, &event);
- if (tx_packets > efx->txq_entries) {
- spent = budget;
- goto out;
- }
- break;
- case FSE_AZ_EV_CODE_DRV_GEN_EV:
- efx_handle_generated_event(channel, &event);
- break;
- case FSE_AZ_EV_CODE_DRIVER_EV:
- efx_handle_driver_event(channel, &event);
- break;
- case FSE_CZ_EV_CODE_USER_EV:
- efx_sriov_event(channel, &event);
- break;
- case FSE_CZ_EV_CODE_MCDI_EV:
- efx_mcdi_process_event(channel, &event);
- break;
- case FSE_AZ_EV_CODE_GLOBAL_EV:
- if (efx->type->handle_global_event &&
- efx->type->handle_global_event(channel, &event))
- break;
- /* else fall through */
- default:
- netif_err(channel->efx, hw, channel->efx->net_dev,
- "channel %d unknown event type %d (data "
- EFX_QWORD_FMT ")\n", channel->channel,
- ev_code, EFX_QWORD_VAL(event));
- }
- }
-
-out:
- channel->eventq_read_ptr = read_ptr;
- return spent;
-}
-
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
@@ -1382,323 +58,18 @@ bool efx_nic_event_present(struct efx_channel *channel)
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
}
-/* Allocate buffer table entries for event queue */
-int efx_nic_probe_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- unsigned entries;
-
- entries = channel->eventq_mask + 1;
- return efx_alloc_special_buffer(efx, &channel->eventq,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_eventq(struct efx_channel *channel)
-{
- efx_oword_t reg;
- struct efx_nic *efx = channel->efx;
-
- netif_dbg(efx, hw, efx->net_dev,
- "channel %d event queue in special buffers %d-%d\n",
- channel->channel, channel->eventq.index,
- channel->eventq.index + channel->eventq.entries - 1);
-
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- EFX_POPULATE_OWORD_3(reg,
- FRF_CZ_TIMER_Q_EN, 1,
- FRF_CZ_HOST_NOTIFY_MODE, 0,
- FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
- }
-
- /* Pin event queue buffer */
- efx_init_special_buffer(efx, &channel->eventq);
-
- /* Fill event queue with all ones (i.e. empty events) */
- memset(channel->eventq.addr, 0xff, channel->eventq.len);
-
- /* Push event queue to card */
- EFX_POPULATE_OWORD_3(reg,
- FRF_AZ_EVQ_EN, 1,
- FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
- FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
- efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
- channel->channel);
-
- efx->type->push_irq_moderation(channel);
-}
-
-void efx_nic_fini_eventq(struct efx_channel *channel)
-{
- efx_oword_t reg;
- struct efx_nic *efx = channel->efx;
-
- /* Remove event queue from card */
- EFX_ZERO_OWORD(reg);
- efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
- channel->channel);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
-
- /* Unpin event queue */
- efx_fini_special_buffer(efx, &channel->eventq);
-}
-
-/* Free buffers backing event queue */
-void efx_nic_remove_eventq(struct efx_channel *channel)
-{
- efx_free_special_buffer(channel->efx, &channel->eventq);
-}
-
-
void efx_nic_event_test_start(struct efx_channel *channel)
{
channel->event_test_cpu = -1;
smp_wmb();
- efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
+ channel->efx->type->ev_test_generate(channel);
}
-void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
-{
- efx_magic_event(efx_rx_queue_channel(rx_queue),
- EFX_CHANNEL_MAGIC_FILL(rx_queue));
-}
-
-/**************************************************************************
- *
- * Hardware interrupts
- * The hardware interrupt handler does very little work; all the event
- * queue processing is carried out by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Enable/disable/generate interrupts */
-static inline void efx_nic_interrupts(struct efx_nic *efx,
- bool enabled, bool force)
-{
- efx_oword_t int_en_reg_ker;
-
- EFX_POPULATE_OWORD_3(int_en_reg_ker,
- FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
- FRF_AZ_KER_INT_KER, force,
- FRF_AZ_DRV_INT_EN_KER, enabled);
- efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
-}
-
-void efx_nic_enable_interrupts(struct efx_nic *efx)
-{
- EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
- wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
-
- efx_nic_interrupts(efx, true, false);
-}
-
-void efx_nic_disable_interrupts(struct efx_nic *efx)
-{
- /* Disable interrupts */
- efx_nic_interrupts(efx, false, false);
-}
-
-/* Generate a test interrupt
- * Interrupt must already have been enabled, otherwise nasty things
- * may happen.
- */
void efx_nic_irq_test_start(struct efx_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
- efx_nic_interrupts(efx, true, true);
-}
-
-/* Process a fatal interrupt
- * Disable bus mastering ASAP and schedule a reset
- */
-irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t *int_ker = efx->irq_status.addr;
- efx_oword_t fatal_intr;
- int error, mem_perr;
-
- efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
- error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
-
- netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
- EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
- EFX_OWORD_VAL(fatal_intr),
- error ? "disabling bus mastering" : "no recognised error");
-
- /* If this is a memory parity error dump which blocks are offending */
- mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
- EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
- if (mem_perr) {
- efx_oword_t reg;
- efx_reado(efx, &reg, FR_AZ_MEM_STAT);
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
- EFX_OWORD_VAL(reg));
- }
-
- /* Disable both devices */
- pci_clear_master(efx->pci_dev);
- if (efx_nic_is_dual_func(efx))
- pci_clear_master(nic_data->pci_dev2);
- efx_nic_disable_interrupts(efx);
-
- /* Count errors and reset or disable the NIC accordingly */
- if (efx->int_error_count == 0 ||
- time_after(jiffies, efx->int_error_expire)) {
- efx->int_error_count = 0;
- efx->int_error_expire =
- jiffies + EFX_INT_ERROR_EXPIRE * HZ;
- }
- if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR - reset scheduled\n");
- efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
- } else {
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR - max number of errors seen."
- "NIC will be disabled\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- }
-
- return IRQ_HANDLED;
-}
-
-/* Handle a legacy interrupt
- * Acknowledges the interrupt and schedule event queue processing.
- */
-static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
-{
- struct efx_nic *efx = dev_id;
- efx_oword_t *int_ker = efx->irq_status.addr;
- irqreturn_t result = IRQ_NONE;
- struct efx_channel *channel;
- efx_dword_t reg;
- u32 queues;
- int syserr;
-
- /* Could this be ours? If interrupts are disabled then the
- * channel state may not be valid.
- */
- if (!efx->legacy_irq_enabled)
- return result;
-
- /* Read the ISR which also ACKs the interrupts */
- efx_readd(efx, &reg, FR_BZ_INT_ISR0);
- queues = EFX_EXTRACT_DWORD(reg, 0, 31);
-
- /* Legacy interrupts are disabled too late by the EEH kernel
- * code. Disable them earlier.
- * If an EEH error occurred, the read will have returned all ones.
- */
- if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
- !efx->eeh_disabled_legacy_irq) {
- disable_irq_nosync(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = true;
- }
-
- /* Handle non-event-queue sources */
- if (queues & (1U << efx->irq_level)) {
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
- efx->last_irq_cpu = raw_smp_processor_id();
- }
-
- if (queues != 0) {
- if (EFX_WORKAROUND_15783(efx))
- efx->irq_zero_count = 0;
-
- /* Schedule processing of any interrupting queues */
- efx_for_each_channel(channel, efx) {
- if (queues & 1)
- efx_schedule_channel_irq(channel);
- queues >>= 1;
- }
- result = IRQ_HANDLED;
-
- } else if (EFX_WORKAROUND_15783(efx)) {
- efx_qword_t *event;
-
- /* We can't return IRQ_HANDLED more than once on seeing ISR=0
- * because this might be a shared interrupt. */
- if (efx->irq_zero_count++ == 0)
- result = IRQ_HANDLED;
-
- /* Ensure we schedule or rearm all event queues */
- efx_for_each_channel(channel, efx) {
- event = efx_event(channel, channel->eventq_read_ptr);
- if (efx_event_present(event))
- efx_schedule_channel_irq(channel);
- else
- efx_nic_eventq_read_ack(channel);
- }
- }
-
- if (result == IRQ_HANDLED)
- netif_vdbg(efx, intr, efx->net_dev,
- "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
-
- return result;
-}
-
-/* Handle an MSI interrupt
- *
- * Handle an MSI hardware interrupt. This routine schedules event
- * queue processing. No interrupt acknowledgement cycle is necessary.
- * Also, we never need to check that the interrupt is for us, since
- * MSI interrupts cannot be shared.
- */
-static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
-{
- struct efx_channel *channel = *(struct efx_channel **)dev_id;
- struct efx_nic *efx = channel->efx;
- efx_oword_t *int_ker = efx->irq_status.addr;
- int syserr;
-
- netif_vdbg(efx, intr, efx->net_dev,
- "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
-
- /* Handle non-event-queue sources */
- if (channel->channel == efx->irq_level) {
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
- efx->last_irq_cpu = raw_smp_processor_id();
- }
-
- /* Schedule processing of the channel */
- efx_schedule_channel_irq(channel);
-
- return IRQ_HANDLED;
-}
-
-
-/* Setup RSS indirection table.
- * This maps from the hash value of the packet to RXQ
- */
-void efx_nic_push_rx_indir_table(struct efx_nic *efx)
-{
- size_t i = 0;
- efx_dword_t dword;
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return;
-
- BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
- FR_BZ_RX_INDIRECTION_TBL_ROWS);
-
- for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
- EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- efx->rx_indir_table[i]);
- efx_writed(efx, &dword,
- FR_BZ_RX_INDIRECTION_TBL +
- FR_BZ_RX_INDIRECTION_TBL_STEP * i);
- }
+ efx->type->irq_test_generate(efx);
}
/* Hook interrupt handler(s)
@@ -1711,13 +82,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
int rc;
if (!EFX_INT_MODE_USE_MSI(efx)) {
- irq_handler_t handler;
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- handler = efx_legacy_interrupt;
- else
- handler = falcon_legacy_interrupt_a1;
-
- rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
+ rc = request_irq(efx->legacy_irq,
+ efx->type->irq_handle_legacy, IRQF_SHARED,
efx->name, efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
@@ -1742,10 +108,10 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
/* Hook MSI or MSI-X interrupt */
n_irqs = 0;
efx_for_each_channel(channel, efx) {
- rc = request_irq(channel->irq, efx_msi_interrupt,
+ rc = request_irq(channel->irq, efx->type->irq_handle_msi,
IRQF_PROBE_SHARED, /* Not shared */
- efx->channel_name[channel->channel],
- &efx->channel[channel->channel]);
+ efx->msi_context[channel->channel].name,
+ &efx->msi_context[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
@@ -1774,7 +140,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
if (n_irqs-- == 0)
break;
- free_irq(channel->irq, &efx->channel[channel->channel]);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
}
fail1:
return rc;
@@ -1783,7 +149,6 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
void efx_nic_fini_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
- efx_oword_t reg;
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
@@ -1792,167 +157,13 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
- free_irq(channel->irq, &efx->channel[channel->channel]);
-
- /* ACK legacy interrupt */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- efx_reado(efx, &reg, FR_BZ_INT_ISR0);
- else
- falcon_irq_ack_a1(efx);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
/* Disable legacy interrupt */
if (efx->legacy_irq)
free_irq(efx->legacy_irq, efx);
}
-/* Looks at available SRAM resources and works out how many queues we
- * can support, and where things like descriptor caches should live.
- *
- * SRAM is split up as follows:
- * 0 buftbl entries for channels
- * efx->vf_buftbl_base buftbl entries for SR-IOV
- * efx->rx_dc_base RX descriptor caches
- * efx->tx_dc_base TX descriptor caches
- */
-void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
-{
- unsigned vi_count, buftbl_min;
-
- /* Account for the buffer table entries backing the datapath channels
- * and the descriptor caches for those channels.
- */
- buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
- efx->n_channels * EFX_MAX_EVQ_SIZE)
- * sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
-
-#ifdef CONFIG_SFC_SRIOV
- if (efx_sriov_wanted(efx)) {
- unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
-
- efx->vf_buftbl_base = buftbl_min;
-
- vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
- vi_count = max(vi_count, EFX_VI_BASE);
- buftbl_free = (sram_lim_qw - buftbl_min -
- vi_count * vi_dc_entries);
-
- entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
- efx_vf_size(efx));
- vf_limit = min(buftbl_free / entries_per_vf,
- (1024U - EFX_VI_BASE) >> efx->vi_scale);
-
- if (efx->vf_count > vf_limit) {
- netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
- efx->vf_count, vf_limit);
- efx->vf_count = vf_limit;
- }
- vi_count += efx->vf_count * efx_vf_size(efx);
- }
-#endif
-
- efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
- efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
-}
-
-u32 efx_nic_fpga_ver(struct efx_nic *efx)
-{
- efx_oword_t altera_build;
- efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
- return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
-}
-
-void efx_nic_init_common(struct efx_nic *efx)
-{
- efx_oword_t temp;
-
- /* Set positions of descriptor caches in SRAM. */
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
- efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
- efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
-
- /* Set TX descriptor cache size. */
- BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
- efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
-
- /* Set RX descriptor cache size. Set low watermark to size-8, as
- * this allows most efficient prefetching.
- */
- BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
- efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
- efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
-
- /* Program INT_KER address */
- EFX_POPULATE_OWORD_2(temp,
- FRF_AZ_NORM_INT_VEC_DIS_KER,
- EFX_INT_MODE_USE_MSI(efx),
- FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
- efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
-
- if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
- /* Use an interrupt level unused by event queues */
- efx->irq_level = 0x1f;
- else
- /* Use a valid MSI-X vector */
- efx->irq_level = 0;
-
- /* Enable all the genuinely fatal interrupts. (They are still
- * masked by the overall interrupt mask, controlled by
- * falcon_interrupts()).
- *
- * Note: All other fatal interrupts are enabled
- */
- EFX_POPULATE_OWORD_3(temp,
- FRF_AZ_ILL_ADR_INT_KER_EN, 1,
- FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
- FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
- EFX_INVERT_OWORD(temp);
- efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
-
- efx_nic_push_rx_indir_table(efx);
-
- /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
- * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
- */
- efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
- /* Enable SW_EV to inherit in char driver - assume harmless here */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
- /* Prefetch threshold 2 => fetch when descriptor cache half empty */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
- /* Disable hardware watchdog which can misfire */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
- /* Squash TX of packets of 16 bytes or less */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
- efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_4(temp,
- /* Default values */
- FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
- FRF_BZ_TX_PACE_SB_AF, 0xb,
- FRF_BZ_TX_PACE_FB_BASE, 0,
- /* Allow large pace values in the
- * fast bin. */
- FRF_BZ_TX_PACE_BIN_TH,
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo(efx, &temp, FR_BZ_TX_PACE);
- }
-}
-
/* Register dump */
#define REGISTER_REVISION_A 1
@@ -2217,3 +428,86 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
}
}
}
+
+/**
+ * efx_nic_describe_stats - Describe supported statistics for ethtool
+ * @desc: Array of &struct efx_hw_stat_desc describing the statistics
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @names: Buffer to copy names to, or %NULL. The names are copied
+ * starting at intervals of %ETH_GSTRING_LEN bytes.
+ *
+ * Returns the number of visible statistics, i.e. the number of set
+ * bits in the first @count bits of @mask for which a name is defined.
+ */
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names)
+{
+ size_t visible = 0;
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].name) {
+ if (names) {
+ strlcpy(names, desc[index].name,
+ ETH_GSTRING_LEN);
+ names += ETH_GSTRING_LEN;
+ }
+ ++visible;
+ }
+ }
+
+ return visible;
+}
+
+/**
+ * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
+ * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
+ * layout. DMA widths of 0, 16, 32 and 64 are supported; where
+ * the width is specified as 0 the corresponding element of
+ * @stats is not updated.
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @stats: Buffer to update with the converted statistics. The length
+ * of this array must be at least the number of set bits in the
+ * first @count bits of @mask.
+ * @dma_buf: DMA buffer containing hardware statistics
+ * @accumulate: If set, the converted values will be added rather than
+ * directly stored to the corresponding elements of @stats
+ */
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate)
+{
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].dma_width) {
+ const void *addr = dma_buf + desc[index].offset;
+ u64 val;
+
+ switch (desc[index].dma_width) {
+ case 16:
+ val = le16_to_cpup((__le16 *)addr);
+ break;
+ case 32:
+ val = le32_to_cpup((__le32 *)addr);
+ break;
+ case 64:
+ val = le64_to_cpup((__le64 *)addr);
+ break;
+ default:
+ WARN_ON(1);
+ val = 0;
+ break;
+ }
+
+ if (accumulate)
+ *stats += val;
+ else
+ *stats = val;
+ }
+
+ ++stats;
+ }
+}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index d63c2991a751..4b1e188f7a2f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2011 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -16,17 +16,13 @@
#include "net_driver.h"
#include "efx.h"
#include "mcdi.h"
-#include "spi.h"
-
-/*
- * Falcon hardware control
- */
enum {
EFX_REV_FALCON_A0 = 0,
EFX_REV_FALCON_A1 = 1,
EFX_REV_FALCON_B0 = 2,
EFX_REV_SIENA_A0 = 3,
+ EFX_REV_HUNT_A0 = 4,
};
static inline int efx_nic_rev(struct efx_nic *efx)
@@ -34,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
return efx->type->revision;
}
-extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
+extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -42,6 +38,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
}
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+ unsigned int index)
+{
+ return ((efx_qword_t *) (channel->eventq.buf.addr)) +
+ (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones. We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords. This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+ return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
+}
+
+/* Decide whether to push a TX descriptor to the NIC vs merely writing
+ * the doorbell. This can reduce latency when we are adding a single
+ * descriptor to an empty queue, but is otherwise pointless. Further,
+ * Falcon and Siena have hardware bugs (SF bug 33851) that may be
+ * triggered if we don't check this.
+ */
+static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
+ && tx_queue->write_count - write_count == 1;
+}
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+}
+
enum {
PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1,
@@ -59,9 +114,6 @@ enum {
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI))
-#define FALCON_GMAC_LOOPBACKS \
- (1 << LOOPBACK_GMAC)
-
/* Alignment of PCIe DMA boundaries (4KB) */
#define EFX_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
@@ -105,13 +157,96 @@ struct falcon_board {
};
/**
+ * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
+ * @device_id: Controller's id for the device
+ * @size: Size (in bytes)
+ * @addr_len: Number of address bytes in read/write commands
+ * @munge_address: Flag whether addresses should be munged.
+ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
+ * use bit 3 of the command byte as address bit A8, rather
+ * than having a two-byte address. If this flag is set, then
+ * commands should be munged in this way.
+ * @erase_command: Erase command (or 0 if sector erase not needed).
+ * @erase_size: Erase sector size (in bytes)
+ * Erase commands affect sectors with this size and alignment.
+ * This must be a power of two.
+ * @block_size: Write block size (in bytes).
+ * Write commands are limited to blocks with this size and alignment.
+ */
+struct falcon_spi_device {
+ int device_id;
+ unsigned int size;
+ unsigned int addr_len;
+ unsigned int munge_address:1;
+ u8 erase_command;
+ unsigned int erase_size;
+ unsigned int block_size;
+};
+
+static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
+{
+ return spi->size != 0;
+}
+
+enum {
+ FALCON_STAT_tx_bytes,
+ FALCON_STAT_tx_packets,
+ FALCON_STAT_tx_pause,
+ FALCON_STAT_tx_control,
+ FALCON_STAT_tx_unicast,
+ FALCON_STAT_tx_multicast,
+ FALCON_STAT_tx_broadcast,
+ FALCON_STAT_tx_lt64,
+ FALCON_STAT_tx_64,
+ FALCON_STAT_tx_65_to_127,
+ FALCON_STAT_tx_128_to_255,
+ FALCON_STAT_tx_256_to_511,
+ FALCON_STAT_tx_512_to_1023,
+ FALCON_STAT_tx_1024_to_15xx,
+ FALCON_STAT_tx_15xx_to_jumbo,
+ FALCON_STAT_tx_gtjumbo,
+ FALCON_STAT_tx_non_tcpudp,
+ FALCON_STAT_tx_mac_src_error,
+ FALCON_STAT_tx_ip_src_error,
+ FALCON_STAT_rx_bytes,
+ FALCON_STAT_rx_good_bytes,
+ FALCON_STAT_rx_bad_bytes,
+ FALCON_STAT_rx_packets,
+ FALCON_STAT_rx_good,
+ FALCON_STAT_rx_bad,
+ FALCON_STAT_rx_pause,
+ FALCON_STAT_rx_control,
+ FALCON_STAT_rx_unicast,
+ FALCON_STAT_rx_multicast,
+ FALCON_STAT_rx_broadcast,
+ FALCON_STAT_rx_lt64,
+ FALCON_STAT_rx_64,
+ FALCON_STAT_rx_65_to_127,
+ FALCON_STAT_rx_128_to_255,
+ FALCON_STAT_rx_256_to_511,
+ FALCON_STAT_rx_512_to_1023,
+ FALCON_STAT_rx_1024_to_15xx,
+ FALCON_STAT_rx_15xx_to_jumbo,
+ FALCON_STAT_rx_gtjumbo,
+ FALCON_STAT_rx_bad_lt64,
+ FALCON_STAT_rx_bad_gtjumbo,
+ FALCON_STAT_rx_overflow,
+ FALCON_STAT_rx_symbol_error,
+ FALCON_STAT_rx_align_error,
+ FALCON_STAT_rx_length_error,
+ FALCON_STAT_rx_internal_error,
+ FALCON_STAT_rx_nodesc_drop_cnt,
+ FALCON_STAT_COUNT
+};
+
+/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
+ * @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
- * @stats_dma_done: Pointer to the flag which indicates DMA completion.
* @spi_flash: SPI flash device
* @spi_eeprom: SPI EEPROM device
* @spi_lock: SPI bus lock
@@ -121,12 +256,12 @@ struct falcon_board {
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
+ u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count;
bool stats_pending;
struct timer_list stats_timer;
- u32 *stats_dma_done;
- struct efx_spi_device spi_flash;
- struct efx_spi_device spi_eeprom;
+ struct falcon_spi_device spi_flash;
+ struct falcon_spi_device spi_eeprom;
struct mutex spi_lock;
struct mutex mdio_lock;
bool xmac_poll_required;
@@ -138,29 +273,148 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
return &data->board;
}
+enum {
+ SIENA_STAT_tx_bytes,
+ SIENA_STAT_tx_good_bytes,
+ SIENA_STAT_tx_bad_bytes,
+ SIENA_STAT_tx_packets,
+ SIENA_STAT_tx_bad,
+ SIENA_STAT_tx_pause,
+ SIENA_STAT_tx_control,
+ SIENA_STAT_tx_unicast,
+ SIENA_STAT_tx_multicast,
+ SIENA_STAT_tx_broadcast,
+ SIENA_STAT_tx_lt64,
+ SIENA_STAT_tx_64,
+ SIENA_STAT_tx_65_to_127,
+ SIENA_STAT_tx_128_to_255,
+ SIENA_STAT_tx_256_to_511,
+ SIENA_STAT_tx_512_to_1023,
+ SIENA_STAT_tx_1024_to_15xx,
+ SIENA_STAT_tx_15xx_to_jumbo,
+ SIENA_STAT_tx_gtjumbo,
+ SIENA_STAT_tx_collision,
+ SIENA_STAT_tx_single_collision,
+ SIENA_STAT_tx_multiple_collision,
+ SIENA_STAT_tx_excessive_collision,
+ SIENA_STAT_tx_deferred,
+ SIENA_STAT_tx_late_collision,
+ SIENA_STAT_tx_excessive_deferred,
+ SIENA_STAT_tx_non_tcpudp,
+ SIENA_STAT_tx_mac_src_error,
+ SIENA_STAT_tx_ip_src_error,
+ SIENA_STAT_rx_bytes,
+ SIENA_STAT_rx_good_bytes,
+ SIENA_STAT_rx_bad_bytes,
+ SIENA_STAT_rx_packets,
+ SIENA_STAT_rx_good,
+ SIENA_STAT_rx_bad,
+ SIENA_STAT_rx_pause,
+ SIENA_STAT_rx_control,
+ SIENA_STAT_rx_unicast,
+ SIENA_STAT_rx_multicast,
+ SIENA_STAT_rx_broadcast,
+ SIENA_STAT_rx_lt64,
+ SIENA_STAT_rx_64,
+ SIENA_STAT_rx_65_to_127,
+ SIENA_STAT_rx_128_to_255,
+ SIENA_STAT_rx_256_to_511,
+ SIENA_STAT_rx_512_to_1023,
+ SIENA_STAT_rx_1024_to_15xx,
+ SIENA_STAT_rx_15xx_to_jumbo,
+ SIENA_STAT_rx_gtjumbo,
+ SIENA_STAT_rx_bad_gtjumbo,
+ SIENA_STAT_rx_overflow,
+ SIENA_STAT_rx_false_carrier,
+ SIENA_STAT_rx_symbol_error,
+ SIENA_STAT_rx_align_error,
+ SIENA_STAT_rx_length_error,
+ SIENA_STAT_rx_internal_error,
+ SIENA_STAT_rx_nodesc_drop_cnt,
+ SIENA_STAT_COUNT
+};
+
/**
* struct siena_nic_data - Siena NIC state
- * @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
- * @hwmon: Hardware monitor state
+ * @stats: Hardware statistics
*/
struct siena_nic_data {
- struct efx_mcdi_iface mcdi;
int wol_filter_id;
-#ifdef CONFIG_SFC_MCDI_MON
- struct efx_mcdi_mon hwmon;
-#endif
+ u64 stats[SIENA_STAT_COUNT];
};
-#ifdef CONFIG_SFC_MCDI_MON
-static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
-{
- struct siena_nic_data *nic_data;
- EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
- nic_data = efx->nic_data;
- return &nic_data->hwmon;
-}
-#endif
+enum {
+ EF10_STAT_tx_bytes,
+ EF10_STAT_tx_packets,
+ EF10_STAT_tx_pause,
+ EF10_STAT_tx_control,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_lt64,
+ EF10_STAT_tx_64,
+ EF10_STAT_tx_65_to_127,
+ EF10_STAT_tx_128_to_255,
+ EF10_STAT_tx_256_to_511,
+ EF10_STAT_tx_512_to_1023,
+ EF10_STAT_tx_1024_to_15xx,
+ EF10_STAT_tx_15xx_to_jumbo,
+ EF10_STAT_rx_bytes,
+ EF10_STAT_rx_bytes_minus_good_bytes,
+ EF10_STAT_rx_good_bytes,
+ EF10_STAT_rx_bad_bytes,
+ EF10_STAT_rx_packets,
+ EF10_STAT_rx_good,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_pause,
+ EF10_STAT_rx_control,
+ EF10_STAT_rx_unicast,
+ EF10_STAT_rx_multicast,
+ EF10_STAT_rx_broadcast,
+ EF10_STAT_rx_lt64,
+ EF10_STAT_rx_64,
+ EF10_STAT_rx_65_to_127,
+ EF10_STAT_rx_128_to_255,
+ EF10_STAT_rx_256_to_511,
+ EF10_STAT_rx_512_to_1023,
+ EF10_STAT_rx_1024_to_15xx,
+ EF10_STAT_rx_15xx_to_jumbo,
+ EF10_STAT_rx_gtjumbo,
+ EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_overflow,
+ EF10_STAT_rx_align_error,
+ EF10_STAT_rx_length_error,
+ EF10_STAT_rx_nodesc_drops,
+ EF10_STAT_COUNT
+};
+
+/**
+ * struct efx_ef10_nic_data - EF10 architecture NIC state
+ * @mcdi_buf: DMA buffer for MCDI
+ * @warm_boot_count: Last seen MC warm boot count
+ * @vi_base: Absolute index of first VI in this function
+ * @n_allocated_vis: Number of VIs allocated to this function
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
+ * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @rx_rss_context: Firmware handle for our RSS context
+ * @stats: Hardware statistics
+ * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
+ * %MC_CMD_GET_CAPABILITIES response)
+ */
+struct efx_ef10_nic_data {
+ struct efx_buffer mcdi_buf;
+ u16 warm_boot_count;
+ unsigned int vi_base;
+ unsigned int n_allocated_vis;
+ bool must_realloc_vis;
+ bool must_restore_filters;
+ u32 rx_rss_context;
+ u64 stats[EF10_STAT_COUNT];
+ bool workaround_35388;
+ u32 datapath_caps;
+};
/*
* On the SFC9000 family each port is associated with 1 PCI physical
@@ -263,6 +517,7 @@ extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_nic_type;
/**************************************************************************
*
@@ -274,35 +529,123 @@ extern const struct efx_nic_type siena_a0_nic_type;
extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
-extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
+static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->efx->type->tx_probe(tx_queue);
+}
+static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_init(tx_queue);
+}
+static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_remove(tx_queue);
+}
+static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_write(tx_queue);
+}
/* RX data path */
-extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
-extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue);
+static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+ return rx_queue->efx->type->rx_probe(rx_queue);
+}
+static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_init(rx_queue);
+}
+static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_remove(rx_queue);
+}
+static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_write(rx_queue);
+}
+static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_defer_refill(rx_queue);
+}
/* Event data path */
-extern int efx_nic_probe_eventq(struct efx_channel *channel);
-extern void efx_nic_init_eventq(struct efx_channel *channel);
-extern void efx_nic_fini_eventq(struct efx_channel *channel);
-extern void efx_nic_remove_eventq(struct efx_channel *channel);
-extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
-extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
-extern bool efx_nic_event_present(struct efx_channel *channel);
+static inline int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_probe(channel);
+}
+static inline int efx_nic_init_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_init(channel);
+}
+static inline void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_fini(channel);
+}
+static inline void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_remove(channel);
+}
+static inline int
+efx_nic_process_eventq(struct efx_channel *channel, int quota)
+{
+ return channel->efx->type->ev_process(channel, quota);
+}
+static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+ channel->efx->type->ev_read_ack(channel);
+}
+extern void efx_nic_event_test_start(struct efx_channel *channel);
-/* MAC/PHY */
-extern void falcon_drain_tx_fifo(struct efx_nic *efx);
-extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
-extern bool falcon_xmac_check_fault(struct efx_nic *efx);
-extern int falcon_reconfigure_xmac(struct efx_nic *efx);
-extern void falcon_update_stats_xmac(struct efx_nic *efx);
+/* Falcon/Siena queue operations */
+extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+extern int efx_farch_ev_probe(struct efx_channel *channel);
+extern int efx_farch_ev_init(struct efx_channel *channel);
+extern void efx_farch_ev_fini(struct efx_channel *channel);
+extern void efx_farch_ev_remove(struct efx_channel *channel);
+extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
+extern void efx_farch_ev_read_ack(struct efx_channel *channel);
+extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+
+/* Falcon/Siena filter operations */
+extern int efx_farch_filter_table_probe(struct efx_nic *efx);
+extern void efx_farch_filter_table_restore(struct efx_nic *efx);
+extern void efx_farch_filter_table_remove(struct efx_nic *efx);
+extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+extern s32 efx_farch_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+extern int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+
+extern bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
@@ -322,16 +665,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
*stat = diff;
}
-/* Interrupts and test events */
+/* Interrupts */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_event_test_start(struct efx_channel *channel);
extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
-extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
-extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
-extern void falcon_irq_ack_a1(struct efx_nic *efx);
+
+/* Falcon/Siena interrupts */
+extern void efx_farch_irq_enable_master(struct efx_nic *efx);
+extern void efx_farch_irq_test_generate(struct efx_nic *efx);
+extern void efx_farch_irq_disable_master(struct efx_nic *efx);
+extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
@@ -345,69 +690,47 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
/* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void siena_prepare_flush(struct efx_nic *efx);
+extern int efx_farch_fini_dmaq(struct efx_nic *efx);
extern void siena_finish_flush(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void
-efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_nic_init_common(struct efx_nic *efx);
-extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
+extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+extern void efx_farch_init_common(struct efx_nic *efx);
+extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
+{
+ efx->type->rx_push_indir_table(efx);
+}
+extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
- unsigned int len);
+ unsigned int len, gfp_t gfp_flags);
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
/* Tests */
-struct efx_nic_register_test {
+struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
-extern int efx_nic_test_registers(struct efx_nic *efx,
- const struct efx_nic_register_test *regs,
- size_t n_regs);
+extern int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs);
extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- */
+extern size_t
+efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+extern void
+efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate);
+
+#define EFX_MAX_FLUSH_TIME 5000
-#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
-#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
-
-/* Retrieve statistic from statistics block */
-#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
- if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
- (efx)->mac_stats.efx_stat += le16_to_cpu( \
- *((__force __le16 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
- (efx)->mac_stats.efx_stat += le32_to_cpu( \
- *((__force __le32 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- else \
- (efx)->mac_stats.efx_stat += le64_to_cpu( \
- *((__force __le64 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- } while (0)
-
-#define FALCON_MAC_STATS_SIZE 0x100
-
-#define MAC_DATA_LBN 0
-#define MAC_DATA_WIDTH 32
-
-extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event);
-
-extern void falcon_poll_xmac(struct efx_nic *efx);
+extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 11d148cd8441..45eeb7075156 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -47,21 +47,4 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
-/****************************************************************************
- * Siena managed PHYs
- */
-extern const struct efx_phy_operations efx_mcdi_phy_ops;
-
-extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad,
- u16 addr, u16 *value_out, u32 *status_out);
-extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad,
- u16 addr, u16 value, u32 *status_out);
-extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl);
-extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
-extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
-
#endif
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index b495394a6dfa..03acf57df045 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -46,7 +46,7 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "io.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "nic.h"
/* Maximum number of events expected to make up a PTP event */
@@ -294,8 +294,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(
- MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
+ MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
};
@@ -311,9 +310,10 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
/* Enable MCDI PTP support. */
static int efx_ptp_enable(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
efx->ptp_data->channel->channel);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
@@ -329,9 +329,10 @@ static int efx_ptp_enable(struct efx_nic *efx)
*/
static int efx_ptp_disable(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@@ -389,14 +390,14 @@ static void efx_ptp_send_times(struct efx_nic *efx,
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
now.ts_real.tv_nsec);
/* Update host time in NIC memory */
- _efx_writed(efx, cpu_to_le32(host_time),
- FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+ efx->type->ptp_write_host_time(efx, host_time);
}
*last_time = now;
}
/* Read a timeset from the MC's results and partial process. */
-static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
+static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
+ struct efx_ptp_timeset *timeset)
{
unsigned start_ns, end_ns;
@@ -425,12 +426,14 @@ static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
* busy. A number of readings are taken so that, hopefully, at least one good
* synchronisation will be seen in the results.
*/
-static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
- size_t response_length,
- const struct pps_event_time *last_time)
+static int
+efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
+ size_t response_length,
+ const struct pps_event_time *last_time)
{
- unsigned number_readings = (response_length /
- MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
+ unsigned number_readings =
+ MCDI_VAR_ARRAY_LEN(response_length,
+ PTP_OUT_SYNCHRONIZE_TIMESET);
unsigned i;
unsigned total;
unsigned ngood = 0;
@@ -447,8 +450,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
* appera to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
- efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
- synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
+ efx_ptp_read_timeset(
+ MCDI_ARRAY_STRUCT_PTR(synch_buf,
+ PTP_OUT_SYNCHRONIZE_TIMESET, i),
+ &ptp->timeset[i]);
}
/* Find the last good host-MC synchronization result. The MC times
@@ -518,7 +523,7 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX];
+ MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX);
size_t response_length;
int rc;
unsigned long timeout;
@@ -527,17 +532,17 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
int *start = ptp->start.addr;
MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
num_readings);
- MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO,
- (u32)ptp->start.dma_addr);
- MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
- (u32)((u64)ptp->start.dma_addr >> 32));
+ MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
+ ptp->start.dma_addr);
/* Clear flag that signals MC ready */
ACCESS_ONCE(*start) = 0;
- efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
- MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ EFX_BUG_ON_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
@@ -564,15 +569,15 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
/* Transmit a PTP packet, via the MCDI interface, to the wire. */
static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
{
- u8 *txbuf = efx->ptp_data->txbuf;
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
struct skb_shared_hwtstamps timestamps;
int rc = -EIO;
- /* MCDI driver requires word aligned lengths */
- size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4);
- u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
+ MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN);
+ size_t len;
- MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
- MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
if (skb_shinfo(skb)->nr_frags != 0) {
rc = skb_linearize(skb);
if (rc != 0)
@@ -585,10 +590,12 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
goto fail;
}
skb_copy_from_linear_data(skb,
- &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST],
- len);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime,
- sizeof(txtime), &len);
+ MCDI_PTR(ptp_data->txbuf,
+ PTP_IN_TRANSMIT_PACKET),
+ skb->len);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP,
+ ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len),
+ txtime, sizeof(txtime), &len);
if (rc != 0)
goto fail;
@@ -872,7 +879,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
if (!efx->ptp_data)
return -ENOMEM;
- rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
+ rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
goto fail1;
@@ -1359,7 +1366,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
struct efx_ptp_data,
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
- u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN];
+ MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
int rc;
@@ -1373,9 +1380,8 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
(PPB_EXTRA_BITS + MAX_PPB_BITS));
MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
- MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns);
- MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI,
- (u32)(adjustment_ns >> 32));
+ MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
@@ -1394,11 +1400,11 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
struct timespec delta_ts = ns_to_timespec(delta);
- u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1411,11 +1417,12 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
struct efx_ptp_data,
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
- u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN];
- u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 326a28637f3c..efa3612affca 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 6af9cfda50fb..4a596725023f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
#include <net/checksum.h>
#include "net_driver.h"
#include "efx.h"
+#include "filter.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
@@ -60,13 +61,12 @@ static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
return page_address(buf->page) + buf->page_offset;
}
-static inline u32 efx_rx_buf_hash(const u8 *eh)
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
{
- /* The ethernet header is always directly after any hash. */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
- return __le32_to_cpup((const __le32 *)(eh - 4));
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
- const u8 *data = eh - 4;
+ const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
@@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
unsigned int fill_level, batch_size;
int space, rc = 0;
+ if (!rx_queue->refill_enabled)
+ return;
+
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
@@ -435,7 +438,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
+ skb->rxhash = efx_rx_buf_hash(efx, eh);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
@@ -523,10 +526,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Validate the number of fragments and completed length */
if (n_frags == 1) {
- efx_rx_packet__check_len(rx_queue, rx_buf, len);
+ if (!(flags & EFX_RX_PKT_PREFIX_LEN))
+ efx_rx_packet__check_len(rx_queue, rx_buf, len);
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
- unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
- unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
+ unlikely(len > n_frags * efx->rx_dma_len) ||
unlikely(!efx->rx_scatter)) {
/* If this isn't an explicit discard request, either
* the hardware or the driver is broken.
@@ -551,7 +555,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
return;
}
- if (n_frags == 1)
+ if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
rx_buf->len = len;
/* Release and/or sync the DMA mapping - assumes all RX buffers
@@ -564,8 +568,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/
prefetch(efx_rx_buf_va(rx_buf));
- rx_buf->page_offset += efx->type->rx_buffer_hash_size;
- rx_buf->len -= efx->type->rx_buffer_hash_size;
+ rx_buf->page_offset += efx->rx_prefix_size;
+ rx_buf->len -= efx->rx_prefix_size;
if (n_frags > 1) {
/* Release/sync DMA mapping for additional fragments.
@@ -577,9 +581,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
if (--tail_frags == 0)
break;
- efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
+ efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
}
- rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
+ rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
@@ -630,6 +634,13 @@ void __efx_rx_packet(struct efx_channel *channel)
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
u8 *eh = efx_rx_buf_va(rx_buf);
+ /* Read length from the prefix if necessary. This already
+ * excludes the length of the prefix itself.
+ */
+ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+ rx_buf->len = le16_to_cpup((__le16 *)
+ (eh + efx->rx_packet_len_offset));
+
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
@@ -738,9 +749,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->max_fill = max_fill;
rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
/* Set up RX descriptor ring */
- rx_queue->enabled = true;
efx_nic_init_rx(rx_queue);
}
@@ -753,11 +764,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- /* A flush failure might have left rx_queue->enabled */
- rx_queue->enabled = false;
-
del_timer_sync(&rx_queue->slow_fill);
- efx_nic_fini_rx(rx_queue);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
@@ -803,3 +810,130 @@ module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_filter_spec spec;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ int nhoff;
+ int rc;
+
+ nhoff = skb_network_offset(skb);
+
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+ nhoff + sizeof(struct vlan_hdr));
+ if (((const struct vlan_hdr *)skb->data + nhoff)->
+ h_vlan_encapsulated_proto != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ /* This is IP over 802.1q VLAN. We can't filter on the
+ * IP 5-tuple and the vlan together, so just strip the
+ * vlan header and filter on the IP part.
+ */
+ nhoff += sizeof(struct vlan_hdr);
+ } else if (skb->protocol != htons(ETH_P_IP)) {
+ return -EPROTONOSUPPORT;
+ }
+
+ /* RFS must validate the IP header length before calling us */
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+ rxq_index);
+ rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+ ip->daddr, ports[1], ip->saddr, ports[0]);
+ if (rc)
+ return rc;
+
+ rc = efx->type->filter_rfs_insert(efx, &spec);
+ if (rc < 0)
+ return rc;
+
+ /* Remember this so we can check whether to expire the filter later */
+ efx->rps_flow_id[rc] = flow_id;
+ channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ ++channel->rfs_filters_added;
+
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+ &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+ rxq_index, flow_id, rc);
+
+ return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
+{
+ bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
+ unsigned int index, size;
+ u32 flow_id;
+
+ if (!spin_trylock_bh(&efx->filter_lock))
+ return false;
+
+ expire_one = efx->type->filter_rfs_expire_one;
+ index = efx->rps_expire_index;
+ size = efx->type->max_rx_ip_filters;
+ while (quota--) {
+ flow_id = efx->rps_flow_id[index];
+ if (expire_one(efx, flow_id, index))
+ netif_info(efx, rx_status, efx->net_dev,
+ "expired filter %d [flow %u]\n",
+ index, flow_id);
+ if (++index == size)
+ index = 0;
+ }
+ efx->rps_expire_index = index;
+
+ spin_unlock_bh(&efx->filter_lock);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+/**
+ * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+ if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 2069f51b2aa9..144bbff5a4ae 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
static int efx_poll_loopback(struct efx_nic *efx)
{
struct efx_loopback_state *state = efx->loopback_selftest;
- struct efx_channel *channel;
- /* NAPI polling is not enabled, so process channels
- * synchronously */
- efx_for_each_channel(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
- }
return atomic_read(&state->rx_good) == state->packet_count;
}
@@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
- } else {
- struct efx_channel *channel = efx_get_channel(efx, 0);
- if (channel->work_pending)
- efx_process_channel_now(channel);
}
mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index aed24b736059..87698ae0bf75 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 8c91775e3c5f..d034bcd124ef 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -18,8 +18,7 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
-#include "spi.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
@@ -30,7 +29,6 @@
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx);
-static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -52,81 +50,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
channel->channel);
}
-static int siena_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- uint32_t status;
- int rc;
-
- rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
- addr, value, &status);
- if (rc)
- return rc;
- if (status != MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
-
-static int siena_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- uint16_t value;
- uint32_t status;
- int rc;
-
- rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
- addr, &value, &status);
- if (rc)
- return rc;
- if (status != MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (int)value;
-}
-
-/* This call is responsible for hooking in the MAC and PHY operations */
-static int siena_probe_port(struct efx_nic *efx)
-{
- int rc;
-
- /* Hook in PHY operations table */
- efx->phy_op = &efx_mcdi_phy_ops;
-
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = siena_mdio_read;
- efx->mdio.mdio_write = siena_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
- rc = efx->phy_op->probe(efx);
- if (rc != 0)
- return rc;
-
- /* Allocate buffer for stats */
- rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- MC_CMD_MAC_NSTATS * sizeof(u64));
- if (rc)
- return rc;
- netif_dbg(efx, probe, efx->net_dev,
- "stats buffer at %llx (virt %p phys %llx)\n",
- (u64)efx->stats_buffer.dma_addr,
- efx->stats_buffer.addr,
- (u64)virt_to_phys(efx->stats_buffer.addr));
-
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
-
- return 0;
-}
-
-static void siena_remove_port(struct efx_nic *efx)
-{
- efx->phy_op->remove(efx);
- efx_nic_free_buffer(efx, &efx->stats_buffer);
-}
-
void siena_prepare_flush(struct efx_nic *efx)
{
if (efx->fc_disable++ == 0)
@@ -139,7 +62,7 @@ void siena_finish_flush(struct efx_nic *efx)
efx_mcdi_set_mac(efx);
}
-static const struct efx_nic_register_test siena_register_tests[] = {
+static const struct efx_farch_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
@@ -178,16 +101,16 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
/* Reset the chip immediately so that it is completely
* quiescent regardless of what any VF driver does.
*/
- rc = siena_reset_hw(efx, reset_method);
+ rc = efx_mcdi_reset(efx, reset_method);
if (rc)
goto out;
tests->registers =
- efx_nic_test_registers(efx, siena_register_tests,
- ARRAY_SIZE(siena_register_tests))
+ efx_farch_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests))
? -1 : 1;
- rc = siena_reset_hw(efx, reset_method);
+ rc = efx_mcdi_reset(efx, reset_method);
out:
rc2 = efx_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
@@ -200,11 +123,6 @@ out:
**************************************************************************
*/
-static enum reset_type siena_map_reset_reason(enum reset_type reason)
-{
- return RESET_TYPE_RECOVER_OR_ALL;
-}
-
static int siena_map_reset_flags(u32 *flags)
{
enum {
@@ -230,21 +148,6 @@ static int siena_map_reset_flags(u32 *flags)
return -EINVAL;
}
-static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
-{
- int rc;
-
- /* Recover from a failed assertion pre-reset */
- rc = efx_mcdi_handle_assertion(efx);
- if (rc)
- return rc;
-
- if (method == RESET_TYPE_WORLD)
- return efx_mcdi_reset_mc(efx);
- else
- return efx_mcdi_reset_port(efx);
-}
-
#ifdef CONFIG_EEH
/* When a PCI device is isolated from the bus, a subsequent MMIO read is
* required for the kernel EEH mechanisms to notice. As the Solarflare driver
@@ -274,19 +177,25 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
return rc;
}
-static void siena_dimension_resources(struct efx_nic *efx)
+static int siena_dimension_resources(struct efx_nic *efx)
{
/* Each port has a small block of internal SRAM dedicated to
* the buffer table and descriptor caches. In theory we can
* map both blocks to one port, but we don't.
*/
- efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+ efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+ return 0;
+}
+
+static unsigned int siena_mem_map_size(struct efx_nic *efx)
+{
+ return FR_CZ_MC_TREG_SMEM +
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
}
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
- bool already_attached = false;
efx_oword_t reg;
int rc;
@@ -296,38 +205,24 @@ static int siena_probe_nic(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
- if (efx_nic_fpga_ver(efx) != 0) {
+ if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Siena FPGA not supported\n");
rc = -ENODEV;
goto fail1;
}
+ efx->max_channels = EFX_MAX_CHANNELS;
+
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
- efx_mcdi_init(efx);
-
- /* Recover from a failed assertion before probing */
- rc = efx_mcdi_handle_assertion(efx);
+ rc = efx_mcdi_init(efx);
if (rc)
goto fail1;
- /* Let the BMC know that the driver is now in charge of link and
- * filter settings. We must do this before we reset the NIC */
- rc = efx_mcdi_drv_attach(efx, true, &already_attached);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unable to register driver with MCPU\n");
- goto fail2;
- }
- if (already_attached)
- /* Not a fatal error */
- netif_err(efx, probe, efx->net_dev,
- "Host already registered with MCPU\n");
-
/* Now we can reset the NIC */
- rc = siena_reset_hw(efx, RESET_TYPE_ALL);
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
@@ -336,7 +231,8 @@ static int siena_probe_nic(struct efx_nic *efx)
siena_init_wol(efx);
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -371,8 +267,7 @@ fail5:
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
- efx_mcdi_drv_attach(efx, false, NULL);
-fail2:
+ efx_mcdi_fini(efx);
fail1:
kfree(efx->nic_data);
return rc;
@@ -448,7 +343,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
- efx_nic_init_common(efx);
+ efx_farch_init_common(efx);
return 0;
}
@@ -458,144 +353,192 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->irq_status);
- siena_reset_hw(efx, RESET_TYPE_ALL);
+ efx_mcdi_reset(efx, RESET_TYPE_ALL);
- /* Relinquish the device back to the BMC */
- efx_mcdi_drv_attach(efx, false, NULL);
+ efx_mcdi_fini(efx);
/* Tear down the private nic state */
kfree(efx->nic_data);
efx->nic_data = NULL;
}
-#define STATS_GENERATION_INVALID ((__force __le64)(-1))
+#define SIENA_DMA_STAT(ext_name, mcdi_name) \
+ [SIENA_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define SIENA_OTHER_STAT(ext_name) \
+ [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
+ SIENA_DMA_STAT(tx_bytes, TX_BYTES),
+ SIENA_OTHER_STAT(tx_good_bytes),
+ SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES),
+ SIENA_DMA_STAT(tx_packets, TX_PKTS),
+ SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ SIENA_DMA_STAT(tx_64, TX_64_PKTS),
+ SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS),
+ SIENA_OTHER_STAT(tx_collision),
+ SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS),
+ SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(rx_bytes, RX_BYTES),
+ SIENA_OTHER_STAT(rx_good_bytes),
+ SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES),
+ SIENA_DMA_STAT(rx_packets, RX_PKTS),
+ SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ SIENA_DMA_STAT(rx_64, RX_64_PKTS),
+ SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS),
+ SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
+};
+static const unsigned long siena_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
+};
+
+static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
+ siena_stat_mask, names);
+}
static int siena_try_update_nic_stats(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
__le64 *dma_stats;
- struct efx_mac_stats *mac_stats;
__le64 generation_start, generation_end;
- mac_stats = &efx->mac_stats;
dma_stats = efx->stats_buffer.addr;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
- if (generation_end == STATS_GENERATION_INVALID)
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
-
-#define MAC_STAT(M, D) \
- mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
-
- MAC_STAT(tx_bytes, TX_BYTES);
- MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
- efx_update_diff_stat(&mac_stats->tx_good_bytes,
- mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
- MAC_STAT(tx_packets, TX_PKTS);
- MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
- MAC_STAT(tx_pause, TX_PAUSE_PKTS);
- MAC_STAT(tx_control, TX_CONTROL_PKTS);
- MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
- MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
- MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
- MAC_STAT(tx_lt64, TX_LT64_PKTS);
- MAC_STAT(tx_64, TX_64_PKTS);
- MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
- MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
- MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
- MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
- MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
- MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
- MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
- mac_stats->tx_collision = 0;
- MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
- MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
- MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
- MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
- MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
- mac_stats->tx_collision = (mac_stats->tx_single_collision +
- mac_stats->tx_multiple_collision +
- mac_stats->tx_excessive_collision +
- mac_stats->tx_late_collision);
- MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
- MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
- MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
- MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
- MAC_STAT(rx_bytes, RX_BYTES);
- MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
- efx_update_diff_stat(&mac_stats->rx_good_bytes,
- mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
- MAC_STAT(rx_packets, RX_PKTS);
- MAC_STAT(rx_good, RX_GOOD_PKTS);
- MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
- MAC_STAT(rx_pause, RX_PAUSE_PKTS);
- MAC_STAT(rx_control, RX_CONTROL_PKTS);
- MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
- MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
- MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
- MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
- MAC_STAT(rx_64, RX_64_PKTS);
- MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
- MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
- MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
- MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
- MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
- MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
- MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
- mac_stats->rx_bad_lt64 = 0;
- mac_stats->rx_bad_64_to_15xx = 0;
- mac_stats->rx_bad_15xx_to_jumbo = 0;
- MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
- MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
- mac_stats->rx_missed = 0;
- MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
- MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
- MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
- MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
- MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
- mac_stats->rx_good_lt64 = 0;
-
- efx->n_rx_nodesc_drop_cnt =
- le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
-
-#undef MAC_STAT
-
+ efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
+ stats, efx->stats_buffer.addr, false);
rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
+ /* Update derived statistics */
+ efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
+ stats[SIENA_STAT_tx_bytes] -
+ stats[SIENA_STAT_tx_bad_bytes]);
+ stats[SIENA_STAT_tx_collision] =
+ stats[SIENA_STAT_tx_single_collision] +
+ stats[SIENA_STAT_tx_multiple_collision] +
+ stats[SIENA_STAT_tx_excessive_collision] +
+ stats[SIENA_STAT_tx_late_collision];
+ efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
+ stats[SIENA_STAT_rx_bytes] -
+ stats[SIENA_STAT_rx_bad_bytes]);
return 0;
}
-static void siena_update_nic_stats(struct efx_nic *efx)
+static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
int retry;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (retry = 0; retry < 100; ++retry) {
if (siena_try_update_nic_stats(efx) == 0)
- return;
+ break;
udelay(100);
}
- /* Use the old values instead */
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[SIENA_STAT_rx_packets];
+ core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
+ core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt];
+ core_stats->multicast = stats[SIENA_STAT_rx_multicast];
+ core_stats->collisions = stats[SIENA_STAT_tx_collision];
+ core_stats->rx_length_errors =
+ stats[SIENA_STAT_rx_gtjumbo] +
+ stats[SIENA_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow];
+ core_stats->tx_window_errors =
+ stats[SIENA_STAT_tx_late_collision];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[SIENA_STAT_rx_symbol_error]);
+ core_stats->tx_errors = (core_stats->tx_window_errors +
+ stats[SIENA_STAT_tx_bad]);
+ }
+
+ return SIENA_STAT_COUNT;
}
-static void siena_start_nic_stats(struct efx_nic *efx)
+static int siena_mac_reconfigure(struct efx_nic *efx)
{
- __le64 *dma_stats = efx->stats_buffer.addr;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN);
+ int rc;
- dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
+ BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN !=
+ MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
+ sizeof(efx->multicast_hash));
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
- MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
-}
+ efx_farch_filter_sync_rx_mode(efx);
-static void siena_stop_nic_stats(struct efx_nic *efx)
-{
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ rc = efx_mcdi_set_mac(efx);
+ if (rc != 0)
+ return rc;
+
+ memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0),
+ efx->multicast_hash.byte, sizeof(efx->multicast_hash));
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
}
/**************************************************************************
@@ -669,6 +612,241 @@ static void siena_init_wol(struct efx_nic *efx)
}
}
+/**************************************************************************
+ *
+ * MCDI
+ *
+ **************************************************************************
+ */
+
+#define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
+#define MCDI_DOORBELL(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
+#define MCDI_STATUS(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
+
+static void siena_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+ unsigned int i;
+ unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
+
+ EFX_BUG_ON_PARANOID(hdr_len != 4);
+
+ efx_writed(efx, hdr, pdu);
+
+ for (i = 0; i < inlen_dw; i++)
+ efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i);
+
+ /* Ensure the request is written out before the doorbell */
+ wmb();
+
+ /* ring the doorbell with a distinctive value */
+ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+}
+
+static bool siena_mcdi_poll_response(struct efx_nic *efx)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ efx_dword_t hdr;
+
+ efx_readd(efx, &hdr, pdu);
+
+ /* All 1's indicates that shared memory is in reset (and is
+ * not a valid hdr). Wait for it to come out reset before
+ * completing the command
+ */
+ return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff &&
+ EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
+ int i;
+
+ for (i = 0; i < outlen_dw; i++)
+ efx_readd(efx, &outbuf[i], pdu + offset + 4 * i);
+}
+
+static int siena_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
+ efx_dword_t reg;
+ u32 value;
+
+ efx_readd(efx, &reg, addr);
+ value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+
+ if (value == 0)
+ return 0;
+
+ EFX_ZERO_DWORD(reg);
+ efx_writed(efx, &reg, addr);
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * copies that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[SIENA_STAT_tx_good_bytes] = 0;
+ nic_data->stats[SIENA_STAT_rx_good_bytes] = 0;
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return -EINTR;
+ else
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * MTD
+ *
+ **************************************************************************
+ */
+
+#ifdef CONFIG_SFC_MTD
+
+struct siena_nvram_type_info {
+ int port;
+ const char *name;
+};
+
+static const struct siena_nvram_type_info siena_nvram_types[] = {
+ [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
+ [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
+ [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
+};
+
+static int siena_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ const struct siena_nvram_type_info *info;
+ size_t size, erase_size;
+ bool protected;
+ int rc;
+
+ if (type >= ARRAY_SIZE(siena_nvram_types) ||
+ siena_nvram_types[type].name == NULL)
+ return -ENODEV;
+
+ info = &siena_nvram_types[type];
+
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+ part->common.dev_type_name = "Siena NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *parts,
+ size_t n_parts)
+{
+ uint16_t fw_subtype_list[
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
+ size_t i;
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < n_parts; i++)
+ parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type];
+
+ return 0;
+}
+
+static int siena_mtd_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_mtd_partition *parts;
+ u32 nvram_types;
+ unsigned int type;
+ size_t n_parts;
+ int rc;
+
+ ASSERT_RTNL();
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ type = 0;
+ n_parts = 0;
+
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = siena_mtd_probe_partition(efx, &parts[n_parts],
+ type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
+ if (rc)
+ goto fail;
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
/**************************************************************************
*
@@ -678,6 +856,7 @@ static void siena_init_wol(struct efx_nic *efx)
*/
const struct efx_nic_type siena_a0_nic_type = {
+ .mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
.init = siena_init_nic,
@@ -688,44 +867,94 @@ const struct efx_nic_type siena_a0_nic_type = {
#else
.monitor = NULL,
#endif
- .map_reset_reason = siena_map_reset_reason,
+ .map_reset_reason = efx_mcdi_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
- .reset = siena_reset_hw,
- .probe_port = siena_probe_port,
- .remove_port = siena_remove_port,
+ .reset = efx_mcdi_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush,
+ .describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
- .start_stats = siena_start_nic_stats,
- .stop_stats = siena_stop_nic_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
- .reconfigure_mac = efx_mcdi_mac_reconfigure,
+ .reconfigure_mac = siena_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
- .reconfigure_port = efx_mcdi_phy_reconfigure,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = siena_mcdi_request,
+ .mcdi_poll_response = siena_mcdi_poll_response,
+ .mcdi_read_response = siena_mcdi_read_response,
+ .mcdi_poll_reboot = siena_mcdi_poll_reboot,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = siena_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = siena_ptp_write_host_time,
.revision = EFX_REV_SIENA_A0,
- .mem_map_size = (FR_CZ_MC_TREG_SMEM +
- FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
- .rx_buffer_hash_size = 0x10,
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
- .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
- * interrupt handler only supports 32
- * channels */
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 90f8d1604f5f..0c38f926871e 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2010-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2010-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,7 +15,7 @@
#include "mcdi.h"
#include "filter.h"
#include "mcdi_pcol.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
@@ -197,8 +197,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index)
static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
unsigned *vi_scale_out, unsigned *vf_total_out)
{
- u8 inbuf[MC_CMD_SRIOV_IN_LEN];
- u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
unsigned vi_scale, vf_total;
size_t outlen;
int rc;
@@ -240,64 +240,55 @@ static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
unsigned int count)
{
- u8 *inbuf, *record;
- unsigned int used;
- u32 from_rid, from_hi, from_lo;
+ MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
+ MCDI_DECLARE_STRUCT_PTR(record);
+ unsigned int index, used;
+ u64 from_addr;
+ u32 from_rid;
int rc;
mb(); /* Finish writing source/reading dest before DMA starts */
- used = MC_CMD_MEMCPY_IN_LEN(count);
- if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
+ if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM))
return -ENOBUFS;
+ used = MC_CMD_MEMCPY_IN_LEN(count);
- /* Allocate room for the largest request */
- inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
- if (inbuf == NULL)
- return -ENOMEM;
-
- record = inbuf;
- MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
- while (count-- > 0) {
+ for (index = 0; index < count; index++) {
+ record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS,
+ count);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
req->to_rid);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
- (u32)req->to_addr);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
- (u32)(req->to_addr >> 32));
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR,
+ req->to_addr);
if (req->from_buf == NULL) {
from_rid = req->from_rid;
- from_lo = (u32)req->from_addr;
- from_hi = (u32)(req->from_addr >> 32);
+ from_addr = req->from_addr;
} else {
- if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
+ if (WARN_ON(used + req->length >
+ MCDI_CTL_SDU_LEN_MAX_V1)) {
rc = -ENOBUFS;
goto out;
}
from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
- from_lo = used;
- from_hi = 0;
- memcpy(inbuf + used, req->from_buf, req->length);
+ from_addr = used;
+ memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
+ req->length);
used += req->length;
}
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
- from_lo);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
- from_hi);
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR,
+ from_addr);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
req->length);
++req;
- record += MC_CMD_MEMCPY_IN_RECORD_LEN;
}
rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
out:
- kfree(inbuf);
-
mb(); /* Don't write source/read dest before DMA is complete */
return rc;
@@ -473,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
++vf->msg_seqno;
- efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
- &event);
+ efx_farch_generate_event(efx,
+ EFX_VI_BASE + vf->index * efx_vf_size(efx),
+ &event);
}
static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
@@ -684,16 +676,12 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
unsigned timeout = HZ;
unsigned index, rxqs_count;
- __le32 *rxqs;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX);
int rc;
BUILD_BUG_ON(VF_MAX_RX_QUEUES >
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
- rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
- if (rxqs == NULL)
- return VFDI_RC_ENOMEM;
-
rtnl_lock();
siena_prepare_flush(efx);
rtnl_unlock();
@@ -708,14 +696,19 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
vf_offset + index);
efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
}
- if (test_bit(index, vf->rxq_mask))
- rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
+ if (test_bit(index, vf->rxq_mask)) {
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
+ }
}
atomic_set(&vf->rxq_retry_count, 0);
while (timeout && (vf->rxq_count || vf->txq_count)) {
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
- rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
+ NULL, 0, NULL);
WARN_ON(rc < 0);
timeout = wait_event_timeout(vf->flush_waitq,
@@ -725,8 +718,10 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
for (index = 0; index < count; ++index) {
if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
atomic_dec(&vf->rxq_retry_count);
- rxqs[rxqs_count++] =
- cpu_to_le32(vf_offset + index);
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
}
}
}
@@ -749,7 +744,6 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
}
efx_sriov_bufs(efx, vf->buftbl_base, NULL,
EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
- kfree(rxqs);
efx_vfdi_flush_clear(vf);
vf->evq0_count = 0;
@@ -1004,7 +998,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work)
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
- if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
+ if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
efx_sriov_reset_vf(vf, &buf);
efx_nic_free_buffer(efx, &buf);
}
@@ -1248,7 +1242,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn));
- rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
+ rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
+ GFP_KERNEL);
if (rc)
goto fail;
@@ -1280,7 +1275,8 @@ int efx_sriov_init(struct efx_nic *efx)
if (rc)
goto fail_cmd;
- rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
+ rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
+ GFP_KERNEL);
if (rc)
goto fail_status;
vfdi_status = efx->vfdi_status.addr;
@@ -1535,7 +1531,7 @@ void efx_sriov_reset(struct efx_nic *efx)
efx_sriov_usrev(efx, true);
(void)efx_sriov_cmd(efx, true, NULL, NULL);
- if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
+ if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
deleted file mode 100644
index 5431a1bbff5c..000000000000
--- a/drivers/net/ethernet/sfc/spi.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_SPI_H
-#define EFX_SPI_H
-
-#include "net_driver.h"
-
-/**************************************************************************
- *
- * Basic SPI command set and bit definitions
- *
- *************************************************************************/
-
-#define SPI_WRSR 0x01 /* Write status register */
-#define SPI_WRITE 0x02 /* Write data to memory array */
-#define SPI_READ 0x03 /* Read data from memory array */
-#define SPI_WRDI 0x04 /* Reset write enable latch */
-#define SPI_RDSR 0x05 /* Read status register */
-#define SPI_WREN 0x06 /* Set write enable latch */
-#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
-
-#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
-#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
-#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
-#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
-#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
-#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
-
-/**
- * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
- * @device_id: Controller's id for the device
- * @size: Size (in bytes)
- * @addr_len: Number of address bytes in read/write commands
- * @munge_address: Flag whether addresses should be munged.
- * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
- * use bit 3 of the command byte as address bit A8, rather
- * than having a two-byte address. If this flag is set, then
- * commands should be munged in this way.
- * @erase_command: Erase command (or 0 if sector erase not needed).
- * @erase_size: Erase sector size (in bytes)
- * Erase commands affect sectors with this size and alignment.
- * This must be a power of two.
- * @block_size: Write block size (in bytes).
- * Write commands are limited to blocks with this size and alignment.
- */
-struct efx_spi_device {
- int device_id;
- unsigned int size;
- unsigned int addr_len;
- unsigned int munge_address:1;
- u8 erase_command;
- unsigned int erase_size;
- unsigned int block_size;
-};
-
-static inline bool efx_spi_present(const struct efx_spi_device *spi)
-{
- return spi->size != 0;
-}
-
-int falcon_spi_cmd(struct efx_nic *efx,
- const struct efx_spi_device *spi, unsigned int command,
- int address, const void *in, void *out, size_t len);
-int falcon_spi_wait_write(struct efx_nic *efx,
- const struct efx_spi_device *spi);
-int falcon_spi_read(struct efx_nic *efx,
- const struct efx_spi_device *spi, loff_t start,
- size_t len, size_t *retlen, u8 *buffer);
-int falcon_spi_write(struct efx_nic *efx,
- const struct efx_spi_device *spi, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer);
-
-/*
- * SFC4000 flash is partitioned into:
- * 0-0x400 chip and board config (see falcon_hwdefs.h)
- * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
- * 0x8000-end boot code (mapped to PCI expansion ROM)
- * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
- * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
- * 0-0x400 chip and board config
- * configurable VPD
- * 0x800-0x1800 boot config
- * Aside from the chip and board config, all of these are optional and may
- * be absent or truncated depending on the devices used.
- */
-#define FALCON_NVCONFIG_END 0x400U
-#define FALCON_FLASH_BOOTCODE_START 0x8000U
-#define EFX_EEPROM_BOOTCONFIG_START 0x800U
-#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
-
-#endif /* EFX_SPI_H */
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index d37cb5017129..2c90e6b31575 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2007-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 5e090e54298e..2ac91c5b5eea 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2010 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -306,7 +306,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
- if (unlikely(buffer->len == 0)) {
+
+ if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
+ unlikely(buffer->len == 0)) {
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %x\n",
tx_queue->queue, read_ptr);
@@ -437,6 +439,9 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* efx_dequeue_buffers()) before reading the queue status.
@@ -543,10 +548,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->initialised = true;
}
-void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
if (!tx_queue->buffer)
return;
@@ -561,22 +569,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
netdev_tx_reset_queue(tx_queue->core_txq);
}
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
- if (!tx_queue->initialised)
- return;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "shutting down TX queue %d\n", tx_queue->queue);
-
- tx_queue->initialised = false;
-
- /* Flush TX queue, remove descriptor ring */
- efx_nic_fini_tx(tx_queue);
-
- efx_release_tx_buffers(tx_queue);
-}
-
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
int i;
@@ -708,7 +700,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
if (unlikely(!page_buf->addr) &&
- efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+ efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
+ GFP_ATOMIC))
return NULL;
result = (u8 *)page_buf->addr + offset;
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 29bb3f9941c0..3d5ee3259885 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
index 225557caaf5a..ae044f44936a 100644
--- a/drivers/net/ethernet/sfc/vfdi.h
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2010-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index e4dd3a7f304b..2310b75d4ec2 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,27 +15,15 @@
* Bug numbers are from Solarflare's Bugzilla.
*/
-#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_10G(efx) 1
-/* XAUI resets if link not detected */
-#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
-/* RX PCIe double split performance issue */
-#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
/* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
-/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
- * or a PCIe error (bug 11028) */
-#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
-/* Transmit flow control may get disabled */
-#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
-/* Legacy ISR read can return zero once */
-#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
@@ -56,4 +44,10 @@
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
+/* Lockup when writing event block registers at gen2/gen3 */
+#define EFX_EF10_WORKAROUND_35388(efx) \
+ (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
+#define EFX_WORKAROUND_35388(efx) \
+ (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
+
#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 9f5f35e041ac..770036bc2d87 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -212,9 +212,8 @@ static void meth_check_link(struct net_device *dev)
static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
- priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
- &priv->tx_ring_dma,
- GFP_ATOMIC | __GFP_ZERO);
+ priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE,
+ &priv->tx_ring_dma, GFP_ATOMIC);
if (!priv->tx_ring)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 02df0894690d..ee18e6f7b4fe 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1770,9 +1770,6 @@ static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
struct sis190_private *tp = netdev_priv(dev);
unsigned long flags;
- if (regs->len > SIS190_REGS_SIZE)
- regs->len = SIS190_REGS_SIZE;
-
spin_lock_irqsave(&tp->lock, flags);
memcpy_fromio(p, tp->mmio_addr, regs->len);
spin_unlock_irqrestore(&tp->lock, flags);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index f5d7ad75e479..b7a39305472b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1309,23 +1309,9 @@ static void sis900_timer(unsigned long data)
struct sis900_private *sis_priv = netdev_priv(net_dev);
struct mii_phy *mii_phy = sis_priv->mii;
static const int next_tick = 5*HZ;
+ int speed = 0, duplex = 0;
u16 status;
- if (!sis_priv->autong_complete){
- int uninitialized_var(speed), duplex = 0;
-
- sis900_read_mode(net_dev, &speed, &duplex);
- if (duplex){
- sis900_set_mode(sis_priv, speed, duplex);
- sis630_set_eq(net_dev, sis_priv->chipset_rev);
- netif_carrier_on(net_dev);
- }
-
- sis_priv->timer.expires = jiffies + HZ;
- add_timer(&sis_priv->timer);
- return;
- }
-
status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
@@ -1336,8 +1322,16 @@ static void sis900_timer(unsigned long data)
status = sis900_default_phy(net_dev);
mii_phy = sis_priv->mii;
- if (status & MII_STAT_LINK)
- sis900_check_mode(net_dev, mii_phy);
+ if (status & MII_STAT_LINK) {
+ WARN_ON(!(status & MII_STAT_AUTO_DONE));
+
+ sis900_read_mode(net_dev, &speed, &duplex);
+ if (duplex) {
+ sis900_set_mode(sis_priv, speed, duplex);
+ sis630_set_eq(net_dev, sis_priv->chipset_rev);
+ netif_carrier_on(net_dev);
+ }
+ }
} else {
/* Link ON -> OFF */
if (!(status & MII_STAT_LINK)){
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 345558fe7367..afe01c4088a3 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2067,7 +2067,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
lp->netdev = ndev;
#ifdef SMC_DYNAMIC_BUS_CONFIG
{
- struct smc911x_platdata *pd = pdev->dev.platform_data;
+ struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
if (!pd) {
ret = -EINVAL;
goto release_both;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index cde13be7c7de..73be7f3982e6 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2202,7 +2202,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
*/
static int smc_drv_probe(struct platform_device *pdev)
{
- struct smc91x_platdata *pd = pdev->dev.platform_data;
+ struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
struct smc_local *lp;
struct net_device *ndev;
struct resource *res, *ires;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a1419211585b..5fdbc2686eb3 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2374,7 +2374,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct net_device *dev;
struct smsc911x_data *pdata;
- struct smsc911x_platform_config *config = pdev->dev.platform_data;
+ struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
struct resource *res, *irq_res;
unsigned int intcfg = 0;
int res_size, irq_flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c922fde929a1..f16a9bdf45bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -70,7 +70,6 @@ struct stmmac_priv {
struct net_device *dev;
struct device *device;
struct mac_device_info *hw;
- int no_csum_insertion;
spinlock_t lock;
struct phy_device *phydev ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index def7e75e1d57..76ad214b4036 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -45,8 +45,8 @@ static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
data = (1000000000ULL / 50000000);
/* 0.465ns accuracy */
- if (value & PTP_TCR_TSCTRLSSR)
- data = (data * 100) / 465;
+ if (!(value & PTP_TCR_TSCTRLSSR))
+ data = (data * 1000) / 465;
writel(data, ioaddr + PTP_SSIR);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0a9bb9d30c3f..8d4ccd35a016 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1224,8 +1224,9 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
- if (likely(priv->plat->force_sf_dma_mode ||
- ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
+ if (priv->plat->force_thresh_dma_mode)
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
+ else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
/*
* In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1c83a44c547b..7a0072003f34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -83,6 +83,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
dma_cfg->mixed_burst =
of_property_read_bool(np, "snps,mixed-burst");
}
+ plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
+ if (plat->force_thresh_dma_mode) {
+ plat->force_sf_dma_mode = 0;
+ pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
return 0;
}
@@ -113,14 +117,11 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
const char *mac = NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
addr = devm_ioremap_resource(dev, res);
if (IS_ERR(addr))
return PTR_ERR(addr);
- plat_dat = pdev->dev.platform_data;
+ plat_dat = dev_get_platdata(&pdev->dev);
if (pdev->dev.of_node) {
if (!plat_dat)
plat_dat = devm_kzalloc(&pdev->dev,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 52b2adf63cbf..f28460ce24a7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9360,7 +9360,7 @@ static ssize_t show_port_phy(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
u32 port_phy = p->port_phy;
char *orig_buf = buf;
int i;
@@ -9390,7 +9390,7 @@ static ssize_t show_plat_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
const char *type_str;
switch (p->plat_type) {
@@ -9419,7 +9419,7 @@ static ssize_t __show_chan_per_port(struct device *dev,
int rx)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
char *orig_buf = buf;
u8 *arr;
int i;
@@ -9452,7 +9452,7 @@ static ssize_t show_num_ports(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
return sprintf(buf, "%d\n", p->num_ports);
}
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d43fa9ff980..7217ee5d6273 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1239,7 +1239,7 @@ static int bigmac_sbus_probe(struct platform_device *op)
static int bigmac_sbus_remove(struct platform_device *op)
{
- struct bigmac *bp = dev_get_drvdata(&op->dev);
+ struct bigmac *bp = platform_get_drvdata(op);
struct device *parent = op->dev.parent;
struct net_device *net_dev = bp->dev;
struct platform_device *qec_op;
@@ -1259,8 +1259,6 @@ static int bigmac_sbus_remove(struct platform_device *op)
free_netdev(net_dev);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 171f5b0809c4..e37b587b3860 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2798,7 +2798,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
goto err_out_free_coherent;
}
- dev_set_drvdata(&op->dev, hp);
+ platform_set_drvdata(op, hp);
if (qfe_slot != -1)
printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
@@ -3111,7 +3111,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
goto err_out_iounmap;
}
- dev_set_drvdata(&pdev->dev, hp);
+ pci_set_drvdata(pdev, hp);
if (!qfe_slot) {
struct pci_dev *qpdev = qp->quattro_dev;
@@ -3159,7 +3159,7 @@ err_out:
static void happy_meal_pci_remove(struct pci_dev *pdev)
{
- struct happy_meal *hp = dev_get_drvdata(&pdev->dev);
+ struct happy_meal *hp = pci_get_drvdata(pdev);
struct net_device *net_dev = hp->dev;
unregister_netdev(net_dev);
@@ -3171,7 +3171,7 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
free_netdev(net_dev);
- dev_set_drvdata(&pdev->dev, NULL);
+ pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
@@ -3231,7 +3231,7 @@ static int hme_sbus_probe(struct platform_device *op)
static int hme_sbus_remove(struct platform_device *op)
{
- struct happy_meal *hp = dev_get_drvdata(&op->dev);
+ struct happy_meal *hp = platform_get_drvdata(op);
struct net_device *net_dev = hp->dev;
unregister_netdev(net_dev);
@@ -3250,8 +3250,6 @@ static int hme_sbus_remove(struct platform_device *op)
free_netdev(net_dev);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 31bbbca341a7..2dc16b6efaf0 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -636,7 +636,7 @@ static void cpmac_hw_stop(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
- struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+ struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
ar7_device_reset(pdata->reset_bit);
cpmac_write(priv->regs, CPMAC_RX_CONTROL,
@@ -659,7 +659,7 @@ static void cpmac_hw_start(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
- struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+ struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
ar7_device_reset(pdata->reset_bit);
for (i = 0; i < 8; i++) {
@@ -1118,7 +1118,7 @@ static int cpmac_probe(struct platform_device *pdev)
struct net_device *dev;
struct plat_cpmac_data *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (external_switch || dumb_switch) {
strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 22a7a4336211..79974e31187a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -34,9 +34,9 @@
#include <linux/of_device.h>
#include <linux/if_vlan.h>
-#include <linux/platform_data/cpsw.h>
#include <linux/pinctrl/consumer.h>
+#include "cpsw.h"
#include "cpsw_ale.h"
#include "cpts.h"
#include "davinci_cpdma.h"
@@ -82,6 +82,8 @@ do { \
#define CPSW_VERSION_1 0x19010a
#define CPSW_VERSION_2 0x19010c
+#define CPSW_VERSION_3 0x19010f
+#define CPSW_VERSION_4 0x190112
#define HOST_PORT_NUM 0
#define SLIVER_SIZE 0x40
@@ -91,6 +93,7 @@ do { \
#define CPSW1_SLAVE_SIZE 0x040
#define CPSW1_CPDMA_OFFSET 0x100
#define CPSW1_STATERAM_OFFSET 0x200
+#define CPSW1_HW_STATS 0x400
#define CPSW1_CPTS_OFFSET 0x500
#define CPSW1_ALE_OFFSET 0x600
#define CPSW1_SLIVER_OFFSET 0x700
@@ -99,6 +102,7 @@ do { \
#define CPSW2_SLAVE_OFFSET 0x200
#define CPSW2_SLAVE_SIZE 0x100
#define CPSW2_CPDMA_OFFSET 0x800
+#define CPSW2_HW_STATS 0x900
#define CPSW2_STATERAM_OFFSET 0xa00
#define CPSW2_CPTS_OFFSET 0xc00
#define CPSW2_ALE_OFFSET 0xd00
@@ -299,6 +303,44 @@ struct cpsw_sliver_regs {
u32 rx_pri_map;
};
+struct cpsw_hw_stats {
+ u32 rxgoodframes;
+ u32 rxbroadcastframes;
+ u32 rxmulticastframes;
+ u32 rxpauseframes;
+ u32 rxcrcerrors;
+ u32 rxaligncodeerrors;
+ u32 rxoversizedframes;
+ u32 rxjabberframes;
+ u32 rxundersizedframes;
+ u32 rxfragments;
+ u32 __pad_0[2];
+ u32 rxoctets;
+ u32 txgoodframes;
+ u32 txbroadcastframes;
+ u32 txmulticastframes;
+ u32 txpauseframes;
+ u32 txdeferredframes;
+ u32 txcollisionframes;
+ u32 txsinglecollframes;
+ u32 txmultcollframes;
+ u32 txexcessivecollisions;
+ u32 txlatecollisions;
+ u32 txunderrun;
+ u32 txcarriersenseerrors;
+ u32 txoctets;
+ u32 octetframes64;
+ u32 octetframes65t127;
+ u32 octetframes128t255;
+ u32 octetframes256t511;
+ u32 octetframes512t1023;
+ u32 octetframes1024tup;
+ u32 netoctets;
+ u32 rxsofoverruns;
+ u32 rxmofoverruns;
+ u32 rxdmaoverruns;
+};
+
struct cpsw_slave {
void __iomem *regs;
struct cpsw_sliver_regs __iomem *sliver;
@@ -332,6 +374,7 @@ struct cpsw_priv {
struct cpsw_platform_data data;
struct cpsw_ss_regs __iomem *regs;
struct cpsw_wr_regs __iomem *wr_regs;
+ u8 __iomem *hw_stats;
struct cpsw_host_regs __iomem *host_port_regs;
u32 msg_enable;
u32 version;
@@ -354,6 +397,94 @@ struct cpsw_priv {
u32 emac_port;
};
+struct cpsw_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+enum {
+ CPSW_STATS,
+ CPDMA_RX_STATS,
+ CPDMA_TX_STATS,
+};
+
+#define CPSW_STAT(m) CPSW_STATS, \
+ sizeof(((struct cpsw_hw_stats *)0)->m), \
+ offsetof(struct cpsw_hw_stats, m)
+#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
+ sizeof(((struct cpdma_chan_stats *)0)->m), \
+ offsetof(struct cpdma_chan_stats, m)
+#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
+ sizeof(((struct cpdma_chan_stats *)0)->m), \
+ offsetof(struct cpdma_chan_stats, m)
+
+static const struct cpsw_stats cpsw_gstrings_stats[] = {
+ { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
+ { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
+ { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
+ { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
+ { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
+ { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
+ { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
+ { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
+ { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
+ { "Rx Fragments", CPSW_STAT(rxfragments) },
+ { "Rx Octets", CPSW_STAT(rxoctets) },
+ { "Good Tx Frames", CPSW_STAT(txgoodframes) },
+ { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
+ { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
+ { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
+ { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
+ { "Collisions", CPSW_STAT(txcollisionframes) },
+ { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
+ { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
+ { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
+ { "Late Collisions", CPSW_STAT(txlatecollisions) },
+ { "Tx Underrun", CPSW_STAT(txunderrun) },
+ { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
+ { "Tx Octets", CPSW_STAT(txoctets) },
+ { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
+ { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
+ { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
+ { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
+ { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
+ { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
+ { "Net Octets", CPSW_STAT(netoctets) },
+ { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
+ { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
+ { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
+ { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+ { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+ { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+ { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
+ { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+ { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+ { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+ { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+ { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+ { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+ { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+ { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
+ { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+ { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
+ { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
+ { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
+ { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
+ { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
+ { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
+ { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
+ { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
+ { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
+ { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
+ { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
+ { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
+ { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
+};
+
+#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
#define for_each_slave(priv, func, arg...) \
do { \
@@ -723,6 +854,69 @@ static int cpsw_set_coalesce(struct net_device *ndev,
return 0;
}
+static int cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CPSW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < CPSW_STATS_LEN; i++) {
+ memcpy(p, cpsw_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpdma_chan_stats rx_stats;
+ struct cpdma_chan_stats tx_stats;
+ u32 val;
+ u8 *p;
+ int i;
+
+ /* Collect Davinci CPDMA stats for Rx and Tx Channel */
+ cpdma_chan_get_stats(priv->rxch, &rx_stats);
+ cpdma_chan_get_stats(priv->txch, &tx_stats);
+
+ for (i = 0; i < CPSW_STATS_LEN; i++) {
+ switch (cpsw_gstrings_stats[i].type) {
+ case CPSW_STATS:
+ val = readl(priv->hw_stats +
+ cpsw_gstrings_stats[i].stat_offset);
+ data[i] = val;
+ break;
+
+ case CPDMA_RX_STATS:
+ p = (u8 *)&rx_stats +
+ cpsw_gstrings_stats[i].stat_offset;
+ data[i] = *(u32 *)p;
+ break;
+
+ case CPDMA_TX_STATS:
+ p = (u8 *)&tx_stats +
+ cpsw_gstrings_stats[i].stat_offset;
+ data[i] = *(u32 *)p;
+ break;
+ }
+ }
+}
+
static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
{
static char *leader = "........................................";
@@ -799,6 +993,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
break;
}
@@ -1232,6 +1428,33 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
}
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int flags = 0;
+ u16 vid = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (priv->data.dual_emac) {
+ vid = priv->slaves[priv->emac_port].port_vlan;
+ flags = ALE_VLAN;
+ }
+
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ flags, vid);
+ cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
+ flags, vid);
+
+ memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ for_each_slave(priv, cpsw_set_slave_mac, priv);
+
+ return 0;
+}
+
static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1326,6 +1549,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
+ .ndo_set_mac_address = cpsw_ndo_set_mac_address,
.ndo_do_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
@@ -1416,6 +1640,29 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
return -EOPNOTSUPP;
}
+static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (priv->slaves[slave_no].phy)
+ phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
+}
+
+static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -1426,6 +1673,11 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.set_settings = cpsw_set_settings,
.get_coalesce = cpsw_get_coalesce,
.set_coalesce = cpsw_set_coalesce,
+ .get_sset_count = cpsw_get_sset_count,
+ .get_strings = cpsw_get_strings,
+ .get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_wol = cpsw_get_wol,
+ .set_wol = cpsw_set_wol,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1623,6 +1875,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->host_port = priv->host_port;
priv_sl2->host_port_regs = priv->host_port_regs;
priv_sl2->wr_regs = priv->wr_regs;
+ priv_sl2->hw_stats = priv->hw_stats;
priv_sl2->dma = priv->dma;
priv_sl2->txch = priv->txch;
priv_sl2->rxch = priv->rxch;
@@ -1780,7 +2033,8 @@ static int cpsw_probe(struct platform_device *pdev)
switch (priv->version) {
case CPSW_VERSION_1:
priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->hw_stats = ss_regs + CPSW1_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -1790,8 +2044,11 @@ static int cpsw_probe(struct platform_device *pdev)
dma_params.desc_mem_phys = 0;
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->hw_stats = ss_regs + CPSW2_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
diff --git a/include/linux/platform_data/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index bb3cd58d71e3..eb3e101ec048 100644
--- a/include/linux/platform_data/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -1,11 +1,10 @@
-/*
- * Texas Instruments Ethernet Switch Driver
+/* Texas Instruments Ethernet Switch Driver
*
- * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments
*
* This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
@@ -22,14 +21,13 @@ struct cpsw_slave_data {
int phy_if;
u8 mac_addr[ETH_ALEN];
u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
-
};
struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
u32 ss_reg_ofs; /* Subsystem control register offset */
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
- struct cpsw_slave_data *slave_data;
u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 031ebc81b50c..90a79462c869 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -591,6 +591,7 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
int cpdma_chan_dump(struct cpdma_chan *chan)
{
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 1a222bce4bd7..67df09ea9d04 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1761,7 +1761,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
const u8 *mac_addr;
if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
- return pdev->dev.platform_data;
+ return dev_get_platdata(&pdev->dev);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 16ddfc348062..4ec92659a100 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -314,7 +314,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
static int davinci_mdio_probe(struct platform_device *pdev)
{
- struct mdio_platform_data *pdata = pdev->dev.platform_data;
+ struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct davinci_mdio_data *data;
struct resource *res;
@@ -421,8 +421,7 @@ bail_out:
static int davinci_mdio_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ struct davinci_mdio_data *data = platform_get_drvdata(pdev);
if (data->bus) {
mdiobus_unregister(data->bus);
@@ -434,8 +433,6 @@ static int davinci_mdio_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- dev_set_drvdata(dev, NULL);
-
kfree(data);
return 0;
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 098b1c42b393..4083ba8839e1 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -15,3 +15,14 @@ config TILE_NET
To compile this driver as a module, choose M here: the module
will be called tile_net.
+
+config PTP_1588_CLOCK_TILEGX
+ tristate "Tilera TILE-Gx mPIPE as PTP clock"
+ select PTP_1588_CLOCK
+ depends on TILE_NET
+ depends on TILEGX
+ ---help---
+ This driver adds support for using the mPIPE as a PTP
+ clock. This clock is only useful if your PTP programs are
+ getting hardware time stamps on the PTP Ethernet packets
+ using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index f3c2d034b32c..949076f4e6ae 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -36,7 +36,10 @@
#include <linux/io.h>
#include <linux/ctype.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -76,6 +79,9 @@
#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+/* The "kinds" of buffer stacks (small/large/jumbo). */
+#define MAX_KINDS 3
+
/* Size of completions data to allocate.
* ISSUE: Probably more than needed since we don't use all the channels.
*/
@@ -130,29 +136,31 @@ struct tile_net_tx_wake {
/* Info for a specific cpu. */
struct tile_net_info {
- /* The NAPI struct. */
- struct napi_struct napi;
- /* Packet queue. */
- gxio_mpipe_iqueue_t iqueue;
/* Our cpu. */
int my_cpu;
- /* True if iqueue is valid. */
- bool has_iqueue;
- /* NAPI flags. */
- bool napi_added;
- bool napi_enabled;
- /* Number of small sk_buffs which must still be provided. */
- unsigned int num_needed_small_buffers;
- /* Number of large sk_buffs which must still be provided. */
- unsigned int num_needed_large_buffers;
/* A timer for handling egress completions. */
struct hrtimer egress_timer;
/* True if "egress_timer" is scheduled. */
bool egress_timer_scheduled;
- /* Comps for each egress channel. */
- struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
- /* Transmit wake timer for each egress channel. */
- struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+ struct info_mpipe {
+ /* Packet queue. */
+ gxio_mpipe_iqueue_t iqueue;
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Number of buffers (by kind) which must still be provided. */
+ unsigned int num_needed_buffers[MAX_KINDS];
+ /* instance id. */
+ int instance;
+ /* True if iqueue is valid. */
+ bool has_iqueue;
+ /* NAPI flags. */
+ bool napi_added;
+ bool napi_enabled;
+ /* Comps for each egress channel. */
+ struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+ /* Transmit wake timer for each egress channel. */
+ struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+ } mpipe[NR_MPIPE_MAX];
};
/* Info for egress on a particular egress channel. */
@@ -177,19 +185,67 @@ struct tile_net_priv {
int loopify_channel;
/* The egress channel (channel or loopify_channel). */
int echannel;
- /* Total stats. */
- struct net_device_stats stats;
+ /* mPIPE instance, 0 or 1. */
+ int instance;
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ /* The timestamp config. */
+ struct hwtstamp_config stamp_cfg;
+#endif
};
-/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
-static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+static struct mpipe_data {
+ /* The ingress irq. */
+ int ingress_irq;
-/* Devices currently associated with each channel.
- * NOTE: The array entry can become NULL after ifconfig down, but
- * we do not free the underlying net_device structures, so it is
- * safe to use a pointer after reading it from this array.
- */
-static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+ /* The "context" for all devices. */
+ gxio_mpipe_context_t context;
+
+ /* Egress info, indexed by "priv->echannel"
+ * (lazily created as needed).
+ */
+ struct tile_net_egress
+ egress_for_echannel[TILE_NET_CHANNELS];
+
+ /* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+ struct net_device
+ *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+ /* The actual memory allocated for the buffer stacks. */
+ void *buffer_stack_vas[MAX_KINDS];
+
+ /* The amount of memory allocated for each buffer stack. */
+ size_t buffer_stack_bytes[MAX_KINDS];
+
+ /* The first buffer stack index
+ * (small = +0, large = +1, jumbo = +2).
+ */
+ int first_buffer_stack;
+
+ /* The buckets. */
+ int first_bucket;
+ int num_buckets;
+
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ /* PTP-specific data. */
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info caps;
+
+ /* Lock for ptp accessors. */
+ struct mutex ptp_lock;
+#endif
+
+} mpipe_data[NR_MPIPE_MAX] = {
+ [0 ... (NR_MPIPE_MAX - 1)] {
+ .ingress_irq = -1,
+ .first_buffer_stack = -1,
+ .first_bucket = -1,
+ .num_buckets = 1
+ }
+};
/* A mutex for "tile_net_devs_for_channel". */
static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
@@ -197,34 +253,17 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
/* The per-cpu info. */
static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
-/* The "context" for all devices. */
-static gxio_mpipe_context_t context;
-/* Buffer sizes and mpipe enum codes for buffer stacks.
+/* The buffer size enums for each buffer stack.
* See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ * We avoid the "10384" size because it can induce "false chaining"
+ * on "cut-through" jumbo packets.
*/
-#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
-#define BUFFER_SIZE_SMALL 128
-#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
-#define BUFFER_SIZE_LARGE 1664
-
-/* The small/large "buffer stacks". */
-static int small_buffer_stack = -1;
-static int large_buffer_stack = -1;
-
-/* Amount of memory allocated for each buffer stack. */
-static size_t buffer_stack_size;
-
-/* The actual memory allocated for the buffer stacks. */
-static void *small_buffer_stack_va;
-static void *large_buffer_stack_va;
-
-/* The buckets. */
-static int first_bucket = -1;
-static int num_buckets = 1;
-
-/* The ingress irq. */
-static int ingress_irq = -1;
+static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
+ GXIO_MPIPE_BUFFER_SIZE_128,
+ GXIO_MPIPE_BUFFER_SIZE_1664,
+ GXIO_MPIPE_BUFFER_SIZE_16384
+};
/* Text value of tile_net.cpus if passed as a module parameter. */
static char *network_cpus_string;
@@ -232,11 +271,21 @@ static char *network_cpus_string;
/* The actual cpus in "network_cpus". */
static struct cpumask network_cpus_map;
-/* If "loopify=LINK" was specified, this is "LINK". */
+/* If "tile_net.loopify=LINK" was specified, this is "LINK". */
static char *loopify_link_name;
-/* If "tile_net.custom" was specified, this is non-NULL. */
-static char *custom_str;
+/* If "tile_net.custom" was specified, this is true. */
+static bool custom_flag;
+
+/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
+static uint jumbo_num;
+
+/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
+static inline int mpipe_instance(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ return priv->instance;
+}
/* The "tile_net.cpus" argument specifies the cpus that are dedicated
* to handle ingress packets.
@@ -289,9 +338,15 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
/* The "tile_net.custom" argument causes us to ignore the "conventional"
* classifier metadata, in particular, the "l2_offset".
*/
-module_param_named(custom, custom_str, charp, 0444);
+module_param_named(custom, custom_flag, bool, 0444);
MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+/* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
+ * and to allocate the given number of "jumbo" buffers.
+ */
+module_param_named(jumbo, jumbo_num, uint, 0444);
+MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets");
+
/* Atomically update a statistics field.
* Note that on TILE-Gx, this operation is fire-and-forget on the
* issuing core (single-cycle dispatch) and takes only a few cycles
@@ -305,15 +360,16 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field)
}
/* Allocate and push a buffer. */
-static bool tile_net_provide_buffer(bool small)
+static bool tile_net_provide_buffer(int instance, int kind)
{
- int stack = small ? small_buffer_stack : large_buffer_stack;
+ struct mpipe_data *md = &mpipe_data[instance];
+ gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
+ size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
const unsigned long buffer_alignment = 128;
struct sk_buff *skb;
int len;
- len = sizeof(struct sk_buff **) + buffer_alignment;
- len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+ len = sizeof(struct sk_buff **) + buffer_alignment + bs;
skb = dev_alloc_skb(len);
if (skb == NULL)
return false;
@@ -328,7 +384,7 @@ static bool tile_net_provide_buffer(bool small)
/* Make sure "skb" and the back-pointer have been flushed. */
wmb();
- gxio_mpipe_push_buffer(&context, stack,
+ gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
(void *)va_to_tile_io_addr(skb->data));
return true;
@@ -354,11 +410,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va)
return skb;
}
-static void tile_net_pop_all_buffers(int stack)
+static void tile_net_pop_all_buffers(int instance, int stack)
{
+ struct mpipe_data *md = &mpipe_data[instance];
+
for (;;) {
tile_io_addr_t addr =
- (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+ (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
+ stack);
if (addr == 0)
break;
dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
@@ -369,24 +428,111 @@ static void tile_net_pop_all_buffers(int stack)
static void tile_net_provide_needed_buffers(void)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ int instance, kind;
+ for (instance = 0; instance < NR_MPIPE_MAX &&
+ info->mpipe[instance].has_iqueue; instance++) {
+ for (kind = 0; kind < MAX_KINDS; kind++) {
+ while (info->mpipe[instance].num_needed_buffers[kind]
+ != 0) {
+ if (!tile_net_provide_buffer(instance, kind)) {
+ pr_notice("Tile %d still needs"
+ " some buffers\n",
+ info->my_cpu);
+ return;
+ }
+ info->mpipe[instance].
+ num_needed_buffers[kind]--;
+ }
+ }
+ }
+}
- while (info->num_needed_small_buffers != 0) {
- if (!tile_net_provide_buffer(true))
- goto oops;
- info->num_needed_small_buffers--;
+/* Get RX timestamp, and store it in the skb. */
+static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
+ gxio_mpipe_idesc_t *idesc)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
+ idesc->time_stamp_ns);
}
+#endif
+}
- while (info->num_needed_large_buffers != 0) {
- if (!tile_net_provide_buffer(false))
- goto oops;
- info->num_needed_large_buffers--;
+/* Get TX timestamp, and store it in the skb. */
+static void tile_tx_timestamp(struct sk_buff *skb, int instance)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ struct skb_shared_info *shtx = skb_shinfo(skb);
+ if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec ts;
+
+ shtx->tx_flags |= SKBTX_IN_PROGRESS;
+ gxio_mpipe_get_timestamp(&md->context, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
}
+#endif
+}
- return;
+/* Use ioctl() to enable or disable TX or RX timestamping. */
+static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ struct hwtstamp_config config;
+ struct tile_net_priv *priv = netdev_priv(dev);
-oops:
- /* Add a description to the page allocation failure dump. */
- pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ priv->stamp_cfg = config;
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
}
static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -398,7 +544,7 @@ static inline bool filter_packet(struct net_device *dev, void *buf)
/* Filter out packets that aren't for us. */
if (!(dev->flags & IFF_PROMISC) &&
!is_multicast_ether_addr(buf) &&
- compare_ether_addr(dev->dev_addr, buf) != 0)
+ !ether_addr_equal(dev->dev_addr, buf))
return true;
return false;
@@ -409,6 +555,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
+ int instance = priv->instance;
/* Encode the actual packet length. */
skb_put(skb, len);
@@ -419,47 +566,52 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_receive_skb(skb);
+ /* Get RX timestamp from idesc. */
+ tile_rx_timestamp(priv, skb, idesc);
+
+ napi_gro_receive(&info->mpipe[instance].napi, skb);
/* Update stats. */
- tile_net_stats_add(1, &priv->stats.rx_packets);
- tile_net_stats_add(len, &priv->stats.rx_bytes);
+ tile_net_stats_add(1, &dev->stats.rx_packets);
+ tile_net_stats_add(len, &dev->stats.rx_bytes);
/* Need a new buffer. */
- if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
- info->num_needed_small_buffers++;
+ if (idesc->size == buffer_size_enums[0])
+ info->mpipe[instance].num_needed_buffers[0]++;
+ else if (idesc->size == buffer_size_enums[1])
+ info->mpipe[instance].num_needed_buffers[1]++;
else
- info->num_needed_large_buffers++;
+ info->mpipe[instance].num_needed_buffers[2]++;
}
/* Handle a packet. Return true if "processed", false if "filtered". */
-static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
- struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
uint8_t l2_offset;
void *va;
void *buf;
unsigned long len;
bool filter;
- /* Drop packets for which no buffer was available.
- * NOTE: This happens under heavy load.
+ /* Drop packets for which no buffer was available (which can
+ * happen under heavy load), or for which the me/tr/ce flags
+ * are set (which can happen for jumbo cut-through packets,
+ * or with a customized classifier).
*/
- if (idesc->be) {
- struct tile_net_priv *priv = netdev_priv(dev);
- tile_net_stats_add(1, &priv->stats.rx_dropped);
- gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
- if (net_ratelimit())
- pr_info("Dropping packet (insufficient buffers).\n");
- return false;
+ if (idesc->be || idesc->me || idesc->tr || idesc->ce) {
+ if (dev)
+ tile_net_stats_add(1, &dev->stats.rx_errors);
+ goto drop;
}
/* Get the "l2_offset", if allowed. */
- l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+ l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
- /* Get the raw buffer VA (includes "headroom"). */
- va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+ /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
+ va = tile_io_addr_to_va((unsigned long)idesc->va);
/* Get the actual packet start/length. */
buf = va + l2_offset;
@@ -470,7 +622,10 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
filter = filter_packet(dev, buf);
if (filter) {
- gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+ if (dev)
+ tile_net_stats_add(1, &dev->stats.rx_dropped);
+drop:
+ gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
} else {
struct sk_buff *skb = mpipe_buf_to_skb(va);
@@ -480,7 +635,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
tile_net_receive_skb(dev, skb, idesc, len);
}
- gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+ gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
return !filter;
}
@@ -501,14 +656,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
unsigned int work = 0;
gxio_mpipe_idesc_t *idesc;
- int i, n;
-
- /* Process packets. */
- while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+ int instance, i, n;
+ struct mpipe_data *md;
+ struct info_mpipe *info_mpipe =
+ container_of(napi, struct info_mpipe, napi);
+
+ instance = info_mpipe->instance;
+ while ((n = gxio_mpipe_iqueue_try_peek(
+ &info_mpipe->iqueue,
+ &idesc)) > 0) {
for (i = 0; i < n; i++) {
if (i == TILE_NET_BATCH)
goto done;
- if (tile_net_handle_packet(idesc + i)) {
+ if (tile_net_handle_packet(instance,
+ idesc + i)) {
if (++work >= budget)
goto done;
}
@@ -516,14 +677,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
}
/* There are no packets left. */
- napi_complete(&info->napi);
+ napi_complete(&info_mpipe->napi);
+ md = &mpipe_data[instance];
/* Re-enable hypervisor interrupts. */
- gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+ gxio_mpipe_enable_notif_ring_interrupt(
+ &md->context, info->mpipe[instance].iqueue.ring);
/* HACK: Avoid the "rotting packet" problem. */
- if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
- napi_schedule(&info->napi);
+ if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
+ napi_schedule(&info_mpipe->napi);
/* ISSUE: Handle completions? */
@@ -533,11 +696,11 @@ done:
return work;
}
-/* Handle an ingress interrupt on the current cpu. */
-static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+/* Handle an ingress interrupt from an instance on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
- napi_schedule(&info->napi);
+ napi_schedule(&info->mpipe[(uint64_t)id].napi);
return IRQ_HANDLED;
}
@@ -579,7 +742,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
{
struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
struct tile_net_priv *priv = netdev_priv(dev);
- struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel];
+ int instance = priv->instance;
+ struct tile_net_tx_wake *tx_wake =
+ &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_start(&tx_wake->timer,
ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
@@ -617,7 +782,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
unsigned long irqflags;
bool pending = false;
- int i;
+ int i, instance;
local_irq_save(irqflags);
@@ -625,13 +790,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
info->egress_timer_scheduled = false;
/* Free all possible comps for this tile. */
- for (i = 0; i < TILE_NET_CHANNELS; i++) {
- struct tile_net_egress *egress = &egress_for_echannel[i];
- struct tile_net_comps *comps = info->comps_for_echannel[i];
- if (comps->comp_last >= comps->comp_next)
- continue;
- tile_net_free_comps(egress->equeue, comps, -1, true);
- pending = pending || (comps->comp_last < comps->comp_next);
+ for (instance = 0; instance < NR_MPIPE_MAX &&
+ info->mpipe[instance].has_iqueue; instance++) {
+ for (i = 0; i < TILE_NET_CHANNELS; i++) {
+ struct tile_net_egress *egress =
+ &mpipe_data[instance].egress_for_echannel[i];
+ struct tile_net_comps *comps =
+ info->mpipe[instance].comps_for_echannel[i];
+ if (!egress || comps->comp_last >= comps->comp_next)
+ continue;
+ tile_net_free_comps(egress->equeue, comps, -1, true);
+ pending = pending ||
+ (comps->comp_last < comps->comp_next);
+ }
}
/* Reschedule timer if needed. */
@@ -643,37 +814,112 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
return HRTIMER_NORESTART;
}
-/* Helper function for "tile_net_update()".
- * "dev" (i.e. arg) is the device being brought up or down,
- * or NULL if all devices are now down.
- */
-static void tile_net_update_cpu(void *arg)
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+
+/* PTP clock operations. */
+
+static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
- struct net_device *dev = arg;
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb))
+ ret = -EINVAL;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
- if (!info->has_iqueue)
- return;
+static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_adjust_timestamp(&md->context, delta))
+ ret = -EBUSY;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
- if (dev != NULL) {
- if (!info->napi_added) {
- netif_napi_add(dev, &info->napi,
- tile_net_poll, TILE_NET_WEIGHT);
- info->napi_added = true;
- }
- if (!info->napi_enabled) {
- napi_enable(&info->napi);
- info->napi_enabled = true;
- }
- enable_percpu_irq(ingress_irq, 0);
- } else {
- disable_percpu_irq(ingress_irq);
- if (info->napi_enabled) {
- napi_disable(&info->napi);
- info->napi_enabled = false;
- }
- /* FIXME: Drain the iqueue. */
- }
+static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_get_timestamp(&md->context, ts))
+ ret = -EBUSY;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
+
+static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_set_timestamp(&md->context, ts))
+ ret = -EBUSY;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
+
+static int ptp_mpipe_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_mpipe_caps = {
+ .owner = THIS_MODULE,
+ .name = "mPIPE clock",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 0,
+ .adjfreq = ptp_mpipe_adjfreq,
+ .adjtime = ptp_mpipe_adjtime,
+ .gettime = ptp_mpipe_gettime,
+ .settime = ptp_mpipe_settime,
+ .enable = ptp_mpipe_enable,
+};
+
+#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
+
+/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
+static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+ gxio_mpipe_set_timestamp(&md->context, &ts);
+
+ mutex_init(&md->ptp_lock);
+ md->caps = ptp_mpipe_caps;
+ md->ptp_clock = ptp_clock_register(&md->caps, NULL);
+ if (IS_ERR(md->ptp_clock))
+ netdev_err(dev, "ptp_clock_register failed %ld\n",
+ PTR_ERR(md->ptp_clock));
+#endif
+}
+
+/* Initialize PTP fields in a new device. */
+static void init_ptp_dev(struct tile_net_priv *priv)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+#endif
+}
+
+/* Helper functions for "tile_net_update()". */
+static void enable_ingress_irq(void *irq)
+{
+ enable_percpu_irq((long)irq, 0);
+}
+
+static void disable_ingress_irq(void *irq)
+{
+ disable_percpu_irq((long)irq);
}
/* Helper function for tile_net_open() and tile_net_stop().
@@ -683,19 +929,22 @@ static int tile_net_update(struct net_device *dev)
{
static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
bool saw_channel = false;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
int channel;
int rc;
int cpu;
- gxio_mpipe_rules_init(&rules, &context);
+ saw_channel = false;
+ gxio_mpipe_rules_init(&rules, &md->context);
for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
- if (tile_net_devs_for_channel[channel] == NULL)
+ if (md->tile_net_devs_for_channel[channel] == NULL)
continue;
if (!saw_channel) {
saw_channel = true;
- gxio_mpipe_rules_begin(&rules, first_bucket,
- num_buckets, NULL);
+ gxio_mpipe_rules_begin(&rules, md->first_bucket,
+ md->num_buckets, NULL);
gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
}
gxio_mpipe_rules_add_channel(&rules, channel);
@@ -706,102 +955,150 @@ static int tile_net_update(struct net_device *dev)
*/
rc = gxio_mpipe_rules_commit(&rules);
if (rc != 0) {
- netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+ netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
+ instance, rc);
return -EIO;
}
- /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
- for_each_online_cpu(cpu)
- smp_call_function_single(cpu, tile_net_update_cpu,
- (saw_channel ? dev : NULL), 1);
+ /* Update all cpus, sequentially (to protect "netif_napi_add()").
+ * We use on_each_cpu to handle the IPI mask or unmask.
+ */
+ if (!saw_channel)
+ on_each_cpu(disable_ingress_irq,
+ (void *)(long)(md->ingress_irq), 1);
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+
+ if (!info->mpipe[instance].has_iqueue)
+ continue;
+ if (saw_channel) {
+ if (!info->mpipe[instance].napi_added) {
+ netif_napi_add(dev, &info->mpipe[instance].napi,
+ tile_net_poll, TILE_NET_WEIGHT);
+ info->mpipe[instance].napi_added = true;
+ }
+ if (!info->mpipe[instance].napi_enabled) {
+ napi_enable(&info->mpipe[instance].napi);
+ info->mpipe[instance].napi_enabled = true;
+ }
+ } else {
+ if (info->mpipe[instance].napi_enabled) {
+ napi_disable(&info->mpipe[instance].napi);
+ info->mpipe[instance].napi_enabled = false;
+ }
+ /* FIXME: Drain the iqueue. */
+ }
+ }
+ if (saw_channel)
+ on_each_cpu(enable_ingress_irq,
+ (void *)(long)(md->ingress_irq), 1);
/* HACK: Allow packets to flow in the simulator. */
if (saw_channel)
- sim_enable_mpipe_links(0, -1);
+ sim_enable_mpipe_links(instance, -1);
return 0;
}
-/* Allocate and initialize mpipe buffer stacks, and register them in
- * the mPIPE TLBs, for both small and large packet sizes.
- * This routine supports tile_net_init_mpipe(), below.
- */
-static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+/* Initialize a buffer stack. */
+static int create_buffer_stack(struct net_device *dev,
+ int kind, size_t num_buffers)
{
pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
- int rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+ size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
+ int stack_idx = md->first_buffer_stack + kind;
+ void *va;
+ int i, rc;
- /* Compute stack bytes; we round up to 64KB and then use
- * alloc_pages() so we get the required 64KB alignment as well.
+ /* Round up to 64KB and then use alloc_pages() so we get the
+ * required 64KB alignment.
*/
- buffer_stack_size =
- ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
- 64 * 1024);
+ md->buffer_stack_bytes[kind] =
+ ALIGN(needed, 64 * 1024);
- /* Allocate two buffer stack indices. */
- rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
- if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
- rc);
- return rc;
- }
- small_buffer_stack = rc;
- large_buffer_stack = rc + 1;
-
- /* Allocate the small memory stack. */
- small_buffer_stack_va =
- alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
- if (small_buffer_stack_va == NULL) {
+ va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
+ if (va == NULL) {
netdev_err(dev,
- "Could not alloc %zd bytes for buffer stacks\n",
- buffer_stack_size);
+ "Could not alloc %zd bytes for buffer stack %d\n",
+ md->buffer_stack_bytes[kind], kind);
return -ENOMEM;
}
- rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
- BUFFER_SIZE_SMALL_ENUM,
- small_buffer_stack_va,
- buffer_stack_size, 0);
+
+ /* Initialize the buffer stack. */
+ rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
+ buffer_size_enums[kind], va,
+ md->buffer_stack_bytes[kind], 0);
if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
+ instance, rc);
+ free_pages_exact(va, md->buffer_stack_bytes[kind]);
return rc;
}
- rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+
+ md->buffer_stack_vas[kind] = va;
+
+ rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
hash_pte, 0);
if (rc != 0) {
netdev_err(dev,
- "gxio_mpipe_register_buffer_memory failed: %d\n",
- rc);
+ "gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
- /* Allocate the large buffer stack. */
- large_buffer_stack_va =
- alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
- if (large_buffer_stack_va == NULL) {
- netdev_err(dev,
- "Could not alloc %zd bytes for buffer stacks\n",
- buffer_stack_size);
- return -ENOMEM;
- }
- rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
- BUFFER_SIZE_LARGE_ENUM,
- large_buffer_stack_va,
- buffer_stack_size, 0);
- if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
- rc);
- return rc;
+ /* Provide initial buffers. */
+ for (i = 0; i < num_buffers; i++) {
+ if (!tile_net_provide_buffer(instance, kind)) {
+ netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+ return -ENOMEM;
+ }
}
- rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
- hash_pte, 0);
- if (rc != 0) {
+
+ return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev,
+ int network_cpus_count)
+{
+ int num_kinds = MAX_KINDS - (jumbo_num == 0);
+ size_t num_buffers;
+ int rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+
+ /* Allocate the buffer stacks. */
+ rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
+ if (rc < 0) {
netdev_err(dev,
- "gxio_mpipe_register_buffer_memory failed: %d\n",
- rc);
+ "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
+ md->first_buffer_stack = rc;
- return 0;
+ /* Enough small/large buffers to (normally) avoid buffer errors. */
+ num_buffers =
+ network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+
+ /* Allocate the small memory stack. */
+ if (rc >= 0)
+ rc = create_buffer_stack(dev, 0, num_buffers);
+
+ /* Allocate the large buffer stack. */
+ if (rc >= 0)
+ rc = create_buffer_stack(dev, 1, num_buffers);
+
+ /* Allocate the jumbo buffer stack if needed. */
+ if (rc >= 0 && jumbo_num != 0)
+ rc = create_buffer_stack(dev, 2, jumbo_num);
+
+ return rc;
}
/* Allocate per-cpu resources (memory for completions and idescs).
@@ -812,6 +1109,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
{
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
int order, i, rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
struct page *page;
void *addr;
@@ -826,7 +1125,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
addr = pfn_to_kaddr(page_to_pfn(page));
memset(addr, 0, COMPS_SIZE);
for (i = 0; i < TILE_NET_CHANNELS; i++)
- info->comps_for_echannel[i] =
+ info->mpipe[instance].comps_for_echannel[i] =
addr + i * sizeof(struct tile_net_comps);
/* If this is a network cpu, create an iqueue. */
@@ -840,14 +1139,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
return -ENOMEM;
}
addr = pfn_to_kaddr(page_to_pfn(page));
- rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
- addr, NOTIF_RING_SIZE, 0);
+ rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
+ &md->context, ring++, addr,
+ NOTIF_RING_SIZE, 0);
if (rc < 0) {
netdev_err(dev,
"gxio_mpipe_iqueue_init failed: %d\n", rc);
return rc;
}
- info->has_iqueue = true;
+ info->mpipe[instance].has_iqueue = true;
}
return ring;
@@ -860,40 +1160,41 @@ static int init_notif_group_and_buckets(struct net_device *dev,
int ring, int network_cpus_count)
{
int group, rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
/* Allocate one NotifGroup. */
- rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+ rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
- rc);
+ netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
group = rc;
/* Initialize global num_buckets value. */
if (network_cpus_count > 4)
- num_buckets = 256;
+ md->num_buckets = 256;
else if (network_cpus_count > 1)
- num_buckets = 16;
+ md->num_buckets = 16;
/* Allocate some buckets, and set global first_bucket value. */
- rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+ rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
- first_bucket = rc;
+ md->first_bucket = rc;
/* Init group and buckets. */
rc = gxio_mpipe_init_notif_group_and_buckets(
- &context, group, ring, network_cpus_count,
- first_bucket, num_buckets,
+ &md->context, group, ring, network_cpus_count,
+ md->first_bucket, md->num_buckets,
GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
if (rc != 0) {
- netdev_err(
- dev,
- "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
- rc);
+ netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
+ "mpipe[%d] %d\n", instance, rc);
return rc;
}
@@ -907,30 +1208,39 @@ static int init_notif_group_and_buckets(struct net_device *dev,
*/
static int tile_net_setup_interrupts(struct net_device *dev)
{
- int cpu, rc;
+ int cpu, rc, irq;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+
+ irq = md->ingress_irq;
+ if (irq < 0) {
+ irq = create_irq();
+ if (irq < 0) {
+ netdev_err(dev,
+ "create_irq failed: mpipe[%d] %d\n",
+ instance, irq);
+ return irq;
+ }
+ tile_irq_activate(irq, TILE_IRQ_PERCPU);
- rc = create_irq();
- if (rc < 0) {
- netdev_err(dev, "create_irq failed: %d\n", rc);
- return rc;
- }
- ingress_irq = rc;
- tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
- rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
- 0, "tile_net", NULL);
- if (rc != 0) {
- netdev_err(dev, "request_irq failed: %d\n", rc);
- destroy_irq(ingress_irq);
- ingress_irq = -1;
- return rc;
+ rc = request_irq(irq, tile_net_handle_ingress_irq,
+ 0, "tile_net", (void *)((uint64_t)instance));
+
+ if (rc != 0) {
+ netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
+ instance, rc);
+ destroy_irq(irq);
+ return rc;
+ }
+ md->ingress_irq = irq;
}
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- if (info->has_iqueue) {
- gxio_mpipe_request_notif_ring_interrupt(
- &context, cpu_x(cpu), cpu_y(cpu),
- KERNEL_PL, ingress_irq, info->iqueue.ring);
+ if (info->mpipe[instance].has_iqueue) {
+ gxio_mpipe_request_notif_ring_interrupt(&md->context,
+ cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
+ info->mpipe[instance].iqueue.ring);
}
}
@@ -938,39 +1248,45 @@ static int tile_net_setup_interrupts(struct net_device *dev)
}
/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
-static void tile_net_init_mpipe_fail(void)
+static void tile_net_init_mpipe_fail(int instance)
{
- int cpu;
+ int kind, cpu;
+ struct mpipe_data *md = &mpipe_data[instance];
/* Do cleanups that require the mpipe context first. */
- if (small_buffer_stack >= 0)
- tile_net_pop_all_buffers(small_buffer_stack);
- if (large_buffer_stack >= 0)
- tile_net_pop_all_buffers(large_buffer_stack);
+ for (kind = 0; kind < MAX_KINDS; kind++) {
+ if (md->buffer_stack_vas[kind] != NULL) {
+ tile_net_pop_all_buffers(instance,
+ md->first_buffer_stack +
+ kind);
+ }
+ }
/* Destroy mpipe context so the hardware no longer owns any memory. */
- gxio_mpipe_destroy(&context);
+ gxio_mpipe_destroy(&md->context);
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- free_pages((unsigned long)(info->comps_for_echannel[0]),
- get_order(COMPS_SIZE));
- info->comps_for_echannel[0] = NULL;
- free_pages((unsigned long)(info->iqueue.idescs),
+ free_pages(
+ (unsigned long)(
+ info->mpipe[instance].comps_for_echannel[0]),
+ get_order(COMPS_SIZE));
+ info->mpipe[instance].comps_for_echannel[0] = NULL;
+ free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
get_order(NOTIF_RING_SIZE));
- info->iqueue.idescs = NULL;
+ info->mpipe[instance].iqueue.idescs = NULL;
}
- if (small_buffer_stack_va)
- free_pages_exact(small_buffer_stack_va, buffer_stack_size);
- if (large_buffer_stack_va)
- free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+ for (kind = 0; kind < MAX_KINDS; kind++) {
+ if (md->buffer_stack_vas[kind] != NULL) {
+ free_pages_exact(md->buffer_stack_vas[kind],
+ md->buffer_stack_bytes[kind]);
+ md->buffer_stack_vas[kind] = NULL;
+ }
+ }
- small_buffer_stack_va = NULL;
- large_buffer_stack_va = NULL;
- large_buffer_stack = -1;
- small_buffer_stack = -1;
- first_bucket = -1;
+ md->first_buffer_stack = -1;
+ md->first_bucket = -1;
}
/* The first time any tilegx network device is opened, we initialize
@@ -984,9 +1300,11 @@ static void tile_net_init_mpipe_fail(void)
*/
static int tile_net_init_mpipe(struct net_device *dev)
{
- int i, num_buffers, rc;
+ int rc;
int cpu;
int first_ring, ring;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
int network_cpus_count = cpus_weight(network_cpus_map);
if (!hash_default) {
@@ -994,36 +1312,21 @@ static int tile_net_init_mpipe(struct net_device *dev)
return -EIO;
}
- rc = gxio_mpipe_init(&context, 0);
+ rc = gxio_mpipe_init(&md->context, instance);
if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
+ instance, rc);
return -EIO;
}
/* Set up the buffer stacks. */
- num_buffers =
- network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
- rc = init_buffer_stacks(dev, num_buffers);
+ rc = init_buffer_stacks(dev, network_cpus_count);
if (rc != 0)
goto fail;
- /* Provide initial buffers. */
- rc = -ENOMEM;
- for (i = 0; i < num_buffers; i++) {
- if (!tile_net_provide_buffer(true)) {
- netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
- goto fail;
- }
- }
- for (i = 0; i < num_buffers; i++) {
- if (!tile_net_provide_buffer(false)) {
- netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
- goto fail;
- }
- }
-
/* Allocate one NotifRing for each network cpu. */
- rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+ rc = gxio_mpipe_alloc_notif_rings(&md->context,
+ network_cpus_count, 0, 0);
if (rc < 0) {
netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
rc);
@@ -1050,10 +1353,13 @@ static int tile_net_init_mpipe(struct net_device *dev)
if (rc != 0)
goto fail;
+ /* Register PTP clock and set mPIPE timestamp, if configured. */
+ register_ptp_clock(dev, md);
+
return 0;
fail:
- tile_net_init_mpipe_fail();
+ tile_net_init_mpipe_fail(instance);
return rc;
}
@@ -1063,17 +1369,19 @@ fail:
*/
static int tile_net_init_egress(struct net_device *dev, int echannel)
{
+ static int ering = -1;
struct page *headers_page, *edescs_page, *equeue_page;
gxio_mpipe_edesc_t *edescs;
gxio_mpipe_equeue_t *equeue;
unsigned char *headers;
int headers_order, edescs_order, equeue_order;
size_t edescs_size;
- int edma;
int rc = -ENOMEM;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
/* Only initialize once. */
- if (egress_for_echannel[echannel].equeue != NULL)
+ if (md->egress_for_echannel[echannel].equeue != NULL)
return 0;
/* Allocate memory for the "headers". */
@@ -1110,28 +1418,41 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
}
equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
- /* Allocate an edma ring. Note that in practice this can't
- * fail, which is good, because we will leak an edma ring if so.
- */
- rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
- if (rc < 0) {
- netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
- rc);
- goto fail_equeue;
+ /* Allocate an edma ring (using a one entry "free list"). */
+ if (ering < 0) {
+ rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
+ if (rc < 0) {
+ netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
+ "mpipe[%d] %d\n", instance, rc);
+ goto fail_equeue;
+ }
+ ering = rc;
}
- edma = rc;
/* Initialize the equeue. */
- rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+ rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
edescs, edescs_size, 0);
if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
+ instance, rc);
goto fail_equeue;
}
+ /* Don't reuse the ering later. */
+ ering = -1;
+
+ if (jumbo_num != 0) {
+ /* Make sure "jumbo" packets can be egressed safely. */
+ if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) {
+ /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
+ netdev_warn(dev, "Jumbo packets may not be egressed"
+ " properly on channel %d\n", echannel);
+ }
+ }
+
/* Done. */
- egress_for_echannel[echannel].equeue = equeue;
- egress_for_echannel[echannel].headers = headers;
+ md->egress_for_echannel[echannel].equeue = equeue;
+ md->egress_for_echannel[echannel].headers = headers;
return 0;
fail_equeue:
@@ -1151,11 +1472,25 @@ fail:
static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
const char *link_name)
{
- int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+ int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
if (rc < 0) {
- netdev_err(dev, "Failed to open '%s'\n", link_name);
+ netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
+ link_name, instance, rc);
return rc;
}
+ if (jumbo_num != 0) {
+ u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO;
+ rc = gxio_mpipe_link_set_attr(link, attr, 1);
+ if (rc != 0) {
+ netdev_err(dev,
+ "Cannot receive jumbo packets on '%s'\n",
+ link_name);
+ gxio_mpipe_link_close(link);
+ return rc;
+ }
+ }
rc = gxio_mpipe_link_channel(link);
if (rc < 0 || rc >= TILE_NET_CHANNELS) {
netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
@@ -1169,12 +1504,23 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
static int tile_net_open(struct net_device *dev)
{
struct tile_net_priv *priv = netdev_priv(dev);
- int cpu, rc;
+ int cpu, rc, instance;
mutex_lock(&tile_net_devs_for_channel_mutex);
- /* Do one-time initialization the first time any device is opened. */
- if (ingress_irq < 0) {
+ /* Get the instance info. */
+ rc = gxio_mpipe_link_instance(dev->name);
+ if (rc < 0 || rc >= NR_MPIPE_MAX) {
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+ return -EIO;
+ }
+
+ priv->instance = rc;
+ instance = rc;
+ if (!mpipe_data[rc].context.mmio_fast_base) {
+ /* Do one-time initialization per instance the first time
+ * any device is opened.
+ */
rc = tile_net_init_mpipe(dev);
if (rc != 0)
goto fail;
@@ -1205,7 +1551,7 @@ static int tile_net_open(struct net_device *dev)
if (rc != 0)
goto fail;
- tile_net_devs_for_channel[priv->channel] = dev;
+ mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
rc = tile_net_update(dev);
if (rc != 0)
@@ -1217,7 +1563,7 @@ static int tile_net_open(struct net_device *dev)
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
struct tile_net_tx_wake *tx_wake =
- &info->tx_wake[priv->echannel];
+ &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
@@ -1243,7 +1589,7 @@ fail:
priv->channel = -1;
}
priv->echannel = -1;
- tile_net_devs_for_channel[priv->channel] = NULL;
+ mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
mutex_unlock(&tile_net_devs_for_channel_mutex);
/* Don't return raw gxio error codes to generic Linux. */
@@ -1255,18 +1601,20 @@ static int tile_net_stop(struct net_device *dev)
{
struct tile_net_priv *priv = netdev_priv(dev);
int cpu;
+ int instance = priv->instance;
+ struct mpipe_data *md = &mpipe_data[instance];
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
struct tile_net_tx_wake *tx_wake =
- &info->tx_wake[priv->echannel];
+ &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_cancel(&tx_wake->timer);
netif_stop_subqueue(dev, cpu);
}
mutex_lock(&tile_net_devs_for_channel_mutex);
- tile_net_devs_for_channel[priv->channel] = NULL;
+ md->tile_net_devs_for_channel[priv->channel] = NULL;
(void)tile_net_update(dev);
if (priv->loopify_channel >= 0) {
if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
@@ -1374,20 +1722,20 @@ static int tso_count_edescs(struct sk_buff *skb)
return num_edescs;
}
-/* Prepare modified copies of the skbuff headers.
- * FIXME: add support for IPv6.
- */
+/* Prepare modified copies of the skbuff headers. */
static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
s64 slot)
{
struct skb_shared_info *sh = skb_shinfo(skb);
struct iphdr *ih;
+ struct ipv6hdr *ih6;
struct tcphdr *th;
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->len - sh_len;
unsigned char *data = skb->data;
unsigned int ih_off, th_off, p_len;
unsigned int isum_seed, tsum_seed, id, seq;
+ int is_ipv6;
long f_id = -1; /* id of the current fragment */
long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
long f_used = 0; /* bytes used from the current fragment */
@@ -1395,18 +1743,24 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
int segment;
/* Locate original headers and compute various lengths. */
- ih = ip_hdr(skb);
+ is_ipv6 = skb_is_gso_v6(skb);
+ if (is_ipv6) {
+ ih6 = ipv6_hdr(skb);
+ ih_off = skb_network_offset(skb);
+ } else {
+ ih = ip_hdr(skb);
+ ih_off = skb_network_offset(skb);
+ isum_seed = ((0xFFFF - ih->check) +
+ (0xFFFF - ih->tot_len) +
+ (0xFFFF - ih->id));
+ id = ntohs(ih->id);
+ }
+
th = tcp_hdr(skb);
- ih_off = skb_network_offset(skb);
th_off = skb_transport_offset(skb);
p_len = sh->gso_size;
- /* Set up seed values for IP and TCP csum and initialize id and seq. */
- isum_seed = ((0xFFFF - ih->check) +
- (0xFFFF - ih->tot_len) +
- (0xFFFF - ih->id));
tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
- id = ntohs(ih->id);
seq = ntohl(th->seq);
/* Prepare all the headers. */
@@ -1420,11 +1774,17 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
memcpy(buf, data, sh_len);
/* Update copied ip header. */
- ih = (struct iphdr *)(buf + ih_off);
- ih->tot_len = htons(sh_len + p_len - ih_off);
- ih->id = htons(id);
- ih->check = csum_long(isum_seed + ih->tot_len +
- ih->id) ^ 0xffff;
+ if (is_ipv6) {
+ ih6 = (struct ipv6hdr *)(buf + ih_off);
+ ih6->payload_len = htons(sh_len + p_len - ih_off -
+ sizeof(*ih6));
+ } else {
+ ih = (struct iphdr *)(buf + ih_off);
+ ih->tot_len = htons(sh_len + p_len - ih_off);
+ ih->id = htons(id);
+ ih->check = csum_long(isum_seed + ih->tot_len +
+ ih->id) ^ 0xffff;
+ }
/* Update copied tcp header. */
th = (struct tcphdr *)(buf + th_off);
@@ -1475,8 +1835,9 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
struct sk_buff *skb, unsigned char *headers, s64 slot)
{
- struct tile_net_priv *priv = netdev_priv(dev);
struct skb_shared_info *sh = skb_shinfo(skb);
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
@@ -1499,8 +1860,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
edesc_head.xfer_size = sh_len;
/* This is only used to specify the TLB. */
- edesc_head.stack_idx = large_buffer_stack;
- edesc_body.stack_idx = large_buffer_stack;
+ edesc_head.stack_idx = md->first_buffer_stack;
+ edesc_body.stack_idx = md->first_buffer_stack;
/* Egress all the edescs. */
for (segment = 0; segment < sh->gso_segs; segment++) {
@@ -1553,8 +1914,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
}
/* Update stats. */
- tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
- tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+ tile_net_stats_add(tx_packets, &dev->stats.tx_packets);
+ tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes);
}
/* Do "TSO" handling for egress.
@@ -1575,8 +1936,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
int channel = priv->echannel;
- struct tile_net_egress *egress = &egress_for_echannel[channel];
- struct tile_net_comps *comps = info->comps_for_echannel[channel];
+ int instance = priv->instance;
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct tile_net_egress *egress = &md->egress_for_echannel[channel];
+ struct tile_net_comps *comps =
+ info->mpipe[instance].comps_for_echannel[channel];
gxio_mpipe_equeue_t *equeue = egress->equeue;
unsigned long irqflags;
int num_edescs;
@@ -1640,10 +2004,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
- struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+ int instance = priv->instance;
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct tile_net_egress *egress =
+ &md->egress_for_echannel[priv->echannel];
gxio_mpipe_equeue_t *equeue = egress->equeue;
struct tile_net_comps *comps =
- info->comps_for_echannel[priv->echannel];
+ info->mpipe[instance].comps_for_echannel[priv->echannel];
unsigned int len = skb->len;
unsigned char *data = skb->data;
unsigned int num_edescs;
@@ -1660,7 +2027,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
/* This is only used to specify the TLB. */
- edesc.stack_idx = large_buffer_stack;
+ edesc.stack_idx = md->first_buffer_stack;
/* Prepare the edescs. */
for (i = 0; i < num_edescs; i++) {
@@ -1693,13 +2060,16 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < num_edescs; i++)
gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+ /* Store TX timestamp if needed. */
+ tile_tx_timestamp(skb, instance);
+
/* Add a completion record. */
add_comp(equeue, comps, slot - 1, skb);
/* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
- tile_net_stats_add(1, &priv->stats.tx_packets);
+ tile_net_stats_add(1, &dev->stats.tx_packets);
tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
- &priv->stats.tx_bytes);
+ &dev->stats.tx_bytes);
local_irq_restore(irqflags);
@@ -1727,20 +2097,18 @@ static void tile_net_tx_timeout(struct net_device *dev)
/* Ioctl commands. */
static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- return -EOPNOTSUPP;
-}
+ if (cmd == SIOCSHWTSTAMP)
+ return tile_hwtstamp_ioctl(dev, rq, cmd);
-/* Get system network statistics for device. */
-static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- return &priv->stats;
+ return -EOPNOTSUPP;
}
/* Change the MTU. */
static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
{
- if ((new_mtu < 68) || (new_mtu > 1500))
+ if (new_mtu < 68)
+ return -EINVAL;
+ if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
@@ -1772,9 +2140,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
*/
static void tile_net_netpoll(struct net_device *dev)
{
- disable_percpu_irq(ingress_irq);
- tile_net_handle_ingress_irq(ingress_irq, NULL);
- enable_percpu_irq(ingress_irq, 0);
+ int instance = mpipe_instance(dev);
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct mpipe_data *md = &mpipe_data[instance];
+
+ disable_percpu_irq(md->ingress_irq);
+ napi_schedule(&info->mpipe[instance].napi);
+ enable_percpu_irq(md->ingress_irq, 0);
}
#endif
@@ -1784,7 +2156,6 @@ static const struct net_device_ops tile_net_ops = {
.ndo_start_xmit = tile_net_tx,
.ndo_select_queue = tile_net_select_queue,
.ndo_do_ioctl = tile_net_ioctl,
- .ndo_get_stats = tile_net_get_stats,
.ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
@@ -1800,14 +2171,21 @@ static const struct net_device_ops tile_net_ops = {
*/
static void tile_net_setup(struct net_device *dev)
{
+ netdev_features_t features = 0;
+
ether_setup(dev);
dev->netdev_ops = &tile_net_ops;
dev->watchdog_timeo = TILE_NET_TIMEOUT;
- dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_HW_CSUM;
- dev->features |= NETIF_F_SG;
- dev->features |= NETIF_F_TSO;
dev->mtu = 1500;
+
+ features |= NETIF_F_HW_CSUM;
+ features |= NETIF_F_SG;
+ features |= NETIF_F_TSO;
+ features |= NETIF_F_TSO6;
+
+ dev->hw_features |= features;
+ dev->vlan_features |= features;
+ dev->features |= features;
}
/* Allocate the device structure, register the device, and obtain the
@@ -1842,6 +2220,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
priv->channel = -1;
priv->loopify_channel = -1;
priv->echannel = -1;
+ init_ptp_dev(priv);
/* Get the MAC address and set it in the device struct; this must
* be done before the device is opened. If the MAC is all zeroes,
@@ -1871,9 +2250,12 @@ static void tile_net_init_module_percpu(void *unused)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
int my_cpu = smp_processor_id();
+ int instance;
- info->has_iqueue = false;
-
+ for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
+ info->mpipe[instance].has_iqueue = false;
+ info->mpipe[instance].instance = instance;
+ }
info->my_cpu = my_cpu;
/* Initialize the egress timer. */
@@ -1890,6 +2272,8 @@ static int __init tile_net_init_module(void)
pr_info("Tilera Network Driver\n");
+ BUILD_BUG_ON(NR_MPIPE_MAX != 2);
+
mutex_init(&tile_net_devs_for_channel_mutex);
/* Initialize each CPU. */
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 36435499814b..106be47716e7 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -31,6 +31,7 @@
#include <linux/in6.h>
#include <linux/timer.h>
#include <linux/io.h>
+#include <linux/u64_stats_sync.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -88,13 +89,6 @@
/* ISSUE: This has not been thoroughly tested (except at 1500). */
#define TILE_NET_MTU 1500
-/* HACK: Define to support GSO. */
-/* ISSUE: This may actually hurt performance of the TCP blaster. */
-/* #define TILE_NET_GSO */
-
-/* Define this to collapse "duplicate" acks. */
-/* #define IGNORE_DUP_ACKS */
-
/* HACK: Define this to verify incoming packets. */
/* #define TILE_NET_VERIFY_INGRESS */
@@ -156,10 +150,13 @@ struct tile_netio_queue {
* Statistics counters for a specific cpu and device.
*/
struct tile_net_stats_t {
- u32 rx_packets;
- u32 rx_bytes;
- u32 tx_packets;
- u32 tx_bytes;
+ struct u64_stats_sync syncp;
+ u64 rx_packets; /* total packets received */
+ u64 tx_packets; /* total packets transmitted */
+ u64 rx_bytes; /* total bytes received */
+ u64 tx_bytes; /* total bytes transmitted */
+ u64 rx_errors; /* packets truncated or marked bad by hw */
+ u64 rx_dropped; /* packets not for us or intf not up */
};
@@ -218,8 +215,6 @@ struct tile_net_priv {
int network_cpus_count;
/* Credits per network cpu. */
int network_cpus_credits;
- /* Network stats. */
- struct net_device_stats stats;
/* For NetIO bringup retries. */
struct delayed_work retry_work;
/* Quick access to per cpu data. */
@@ -627,79 +622,6 @@ static void tile_net_handle_egress_timer(unsigned long arg)
}
-#ifdef IGNORE_DUP_ACKS
-
-/*
- * Help detect "duplicate" ACKs. These are sequential packets (for a
- * given flow) which are exactly 66 bytes long, sharing everything but
- * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
- * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
- * +N, and the Tstamps are usually identical.
- *
- * NOTE: Apparently truly duplicate acks (with identical "ack" values),
- * should not be collapsed, as they are used for some kind of flow control.
- */
-static bool is_dup_ack(char *s1, char *s2, unsigned int len)
-{
- int i;
-
- unsigned long long ignorable = 0;
-
- /* Identification. */
- ignorable |= (1ULL << 0x12);
- ignorable |= (1ULL << 0x13);
-
- /* Header checksum. */
- ignorable |= (1ULL << 0x18);
- ignorable |= (1ULL << 0x19);
-
- /* ACK. */
- ignorable |= (1ULL << 0x2a);
- ignorable |= (1ULL << 0x2b);
- ignorable |= (1ULL << 0x2c);
- ignorable |= (1ULL << 0x2d);
-
- /* WinSize. */
- ignorable |= (1ULL << 0x30);
- ignorable |= (1ULL << 0x31);
-
- /* Checksum. */
- ignorable |= (1ULL << 0x32);
- ignorable |= (1ULL << 0x33);
-
- for (i = 0; i < len; i++, ignorable >>= 1) {
-
- if ((ignorable & 1) || (s1[i] == s2[i]))
- continue;
-
-#ifdef TILE_NET_DEBUG
- /* HACK: Mention non-timestamp diffs. */
- if (i < 0x38 && i != 0x2f &&
- net_ratelimit())
- pr_info("Diff at 0x%x\n", i);
-#endif
-
- return false;
- }
-
-#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
- /* HACK: Do not suppress truly duplicate ACKs. */
- /* ISSUE: Is this actually necessary or helpful? */
- if (s1[0x2a] == s2[0x2a] &&
- s1[0x2b] == s2[0x2b] &&
- s1[0x2c] == s2[0x2c] &&
- s1[0x2d] == s2[0x2d]) {
- return false;
- }
-#endif
-
- return true;
-}
-
-#endif
-
-
-
static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
{
struct tile_netio_queue *queue = &info->queue;
@@ -774,6 +696,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
+ netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt);
/* Extract the packet size. FIXME: Shouldn't the second line */
/* get subtracted? Mostly moot, since it should be "zero". */
@@ -806,40 +729,25 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
#endif /* TILE_NET_DUMP_PACKETS */
#ifdef TILE_NET_VERIFY_INGRESS
- if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
- NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
- /* Bug 6624: Includes UDP packets with a "zero" checksum. */
- pr_warning("Bad L4 checksum on %d byte packet.\n", len);
- }
- if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
- NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
+ if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) {
dump_packet(buf, len, "rx");
- panic("Bad L3 checksum.");
- }
- switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
- case NETIO_PKT_STATUS_OVERSIZE:
- if (len >= 64) {
- dump_packet(buf, len, "rx");
- panic("Unexpected OVERSIZE.");
- }
- break;
- case NETIO_PKT_STATUS_BAD:
- pr_warning("Unexpected BAD %ld byte packet.\n", len);
+ panic("Unexpected OVERSIZE.");
}
#endif
filter = 0;
- /* ISSUE: Filter TCP packets with "bad" checksums? */
-
- if (!(dev->flags & IFF_UP)) {
+ if (pkt_status == NETIO_PKT_STATUS_BAD) {
+ /* Handle CRC error and hardware truncation. */
+ filter = 2;
+ } else if (!(dev->flags & IFF_UP)) {
/* Filter packets received before we're up. */
filter = 1;
- } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) {
+ } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) &&
+ pkt_status == NETIO_PKT_STATUS_UNDERSIZE) {
/* Filter "truncated" packets. */
- filter = 1;
+ filter = 2;
} else if (!(dev->flags & IFF_PROMISC)) {
- /* FIXME: Implement HW multicast filter. */
if (!is_multicast_ether_addr(buf)) {
/* Filter packets not for our address. */
const u8 *mine = dev->dev_addr;
@@ -847,9 +755,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
}
}
- if (filter) {
+ u64_stats_update_begin(&stats->syncp);
- /* ISSUE: Update "drop" statistics? */
+ if (filter != 0) {
+
+ if (filter == 1)
+ stats->rx_dropped++;
+ else
+ stats->rx_errors++;
tile_net_provide_linux_buffer(info, va, small);
@@ -881,6 +794,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
stats->rx_bytes += len;
}
+ u64_stats_update_end(&stats->syncp);
+
/* ISSUE: It would be nice to defer this until the packet has */
/* actually been processed. */
tile_net_return_credit(info);
@@ -1907,8 +1822,10 @@ busy:
kfree_skb(olds[i]);
/* Update stats. */
+ u64_stats_update_begin(&stats->syncp);
stats->tx_packets += num_segs;
stats->tx_bytes += (num_segs * sh_len) + d_len;
+ u64_stats_update_end(&stats->syncp);
/* Make sure the egress timer is scheduled. */
tile_net_schedule_egress_timer(info);
@@ -1936,7 +1853,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned int csum_start = skb_checksum_start_offset(skb);
- lepp_frag_t frags[LEPP_MAX_FRAGS];
+ lepp_frag_t frags[1 + MAX_SKB_FRAGS];
unsigned int num_frags;
@@ -1951,7 +1868,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned int cmd_head, cmd_tail, cmd_next;
unsigned int comp_tail;
- lepp_cmd_t cmds[LEPP_MAX_FRAGS];
+ lepp_cmd_t cmds[1 + MAX_SKB_FRAGS];
/*
@@ -2089,8 +2006,10 @@ busy:
kfree_skb(olds[i]);
/* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
+ u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
+ u64_stats_update_end(&stats->syncp);
/* Make sure the egress timer is scheduled. */
tile_net_schedule_egress_timer(info);
@@ -2127,30 +2046,51 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*
* Returns the address of the device statistics structure.
*/
-static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct tile_net_priv *priv = netdev_priv(dev);
- u32 rx_packets = 0;
- u32 tx_packets = 0;
- u32 rx_bytes = 0;
- u32 tx_bytes = 0;
+ u64 rx_packets = 0, tx_packets = 0;
+ u64 rx_bytes = 0, tx_bytes = 0;
+ u64 rx_errors = 0, rx_dropped = 0;
int i;
for_each_online_cpu(i) {
- if (priv->cpu[i]) {
- rx_packets += priv->cpu[i]->stats.rx_packets;
- rx_bytes += priv->cpu[i]->stats.rx_bytes;
- tx_packets += priv->cpu[i]->stats.tx_packets;
- tx_bytes += priv->cpu[i]->stats.tx_bytes;
- }
+ struct tile_net_stats_t *cpu_stats;
+ u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes;
+ u64 trx_errors, trx_dropped;
+ unsigned int start;
+
+ if (priv->cpu[i] == NULL)
+ continue;
+ cpu_stats = &priv->cpu[i]->stats;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ trx_packets = cpu_stats->rx_packets;
+ ttx_packets = cpu_stats->tx_packets;
+ trx_bytes = cpu_stats->rx_bytes;
+ ttx_bytes = cpu_stats->tx_bytes;
+ trx_errors = cpu_stats->rx_errors;
+ trx_dropped = cpu_stats->rx_dropped;
+ } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+
+ rx_packets += trx_packets;
+ tx_packets += ttx_packets;
+ rx_bytes += trx_bytes;
+ tx_bytes += ttx_bytes;
+ rx_errors += trx_errors;
+ rx_dropped += trx_dropped;
}
- priv->stats.rx_packets = rx_packets;
- priv->stats.rx_bytes = rx_bytes;
- priv->stats.tx_packets = tx_packets;
- priv->stats.tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->tx_packets = tx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_errors = rx_errors;
+ stats->rx_dropped = rx_dropped;
- return &priv->stats;
+ return stats;
}
@@ -2287,7 +2227,7 @@ static const struct net_device_ops tile_net_ops = {
.ndo_stop = tile_net_stop,
.ndo_start_xmit = tile_net_tx,
.ndo_do_ioctl = tile_net_ioctl,
- .ndo_get_stats = tile_net_get_stats,
+ .ndo_get_stats64 = tile_net_get_stats64,
.ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
@@ -2305,39 +2245,30 @@ static const struct net_device_ops tile_net_ops = {
*/
static void tile_net_setup(struct net_device *dev)
{
- PDEBUG("tile_net_setup()\n");
+ netdev_features_t features = 0;
ether_setup(dev);
-
dev->netdev_ops = &tile_net_ops;
-
dev->watchdog_timeo = TILE_NET_TIMEOUT;
+ dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
+ dev->mtu = TILE_NET_MTU;
- /* We want lockless xmit. */
- dev->features |= NETIF_F_LLTX;
-
- /* We support hardware tx checksums. */
- dev->features |= NETIF_F_HW_CSUM;
-
- /* We support scatter/gather. */
- dev->features |= NETIF_F_SG;
-
- /* We support TSO. */
- dev->features |= NETIF_F_TSO;
+ features |= NETIF_F_HW_CSUM;
+ features |= NETIF_F_SG;
-#ifdef TILE_NET_GSO
- /* We support GSO. */
- dev->features |= NETIF_F_GSO;
-#endif
+ /* We support TSO iff the HV supports sufficient frags. */
+ if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS)
+ features |= NETIF_F_TSO;
+ /* We can't support HIGHDMA without hash_default, since we need
+ * to be able to finv() with a VA if we don't have hash_default.
+ */
if (hash_default)
- dev->features |= NETIF_F_HIGHDMA;
-
- /* ISSUE: We should support NETIF_F_UFO. */
+ features |= NETIF_F_HIGHDMA;
- dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
-
- dev->mtu = TILE_NET_MTU;
+ dev->hw_features |= features;
+ dev->vlan_features |= features;
+ dev->features |= features;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01bdc6ca0755..c4dbf981804b 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,13 +1308,13 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name);
}
- data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma,
- GFP_KERNEL | __GFP_ZERO);
+ data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma,
+ GFP_KERNEL);
if (!data->rxring)
return -ENOMEM;
- data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma,
- GFP_KERNEL | __GFP_ZERO);
+ data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
+ GFP_KERNEL);
if (!data->txring) {
pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
return -ENOMEM;
@@ -1558,7 +1558,7 @@ tsi108_init_one(struct platform_device *pdev)
hw_info *einfo;
int err = 0;
- einfo = pdev->dev.platform_data;
+ einfo = dev_get_platdata(&pdev->dev);
if (NULL == einfo) {
printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index b75eb9e0e867..c8f088ab5fdf 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2407,7 +2407,7 @@ static struct pci_driver rhine_driver = {
.driver.pm = RHINE_PM_OPS,
};
-static struct dmi_system_id __initdata rhine_dmi_table[] = {
+static struct dmi_system_id rhine_dmi_table[] __initdata = {
{
.ident = "EPIA-M",
.matches = {
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d01cacf8a7c2..d022bf936572 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2376,6 +2376,23 @@ out_0:
return ret;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * velocity_poll_controller - Velocity Poll controller function
+ * @dev: network device
+ *
+ *
+ * Used by NETCONSOLE and other diagnostic tools to allow network I/P
+ * with interrupts disabled.
+ */
+static void velocity_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ velocity_intr(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
/**
* velocity_mii_ioctl - MII ioctl handler
* @dev: network device
@@ -2641,6 +2658,9 @@ static const struct net_device_ops velocity_netdev_ops = {
.ndo_do_ioctl = velocity_ioctl,
.ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = velocity_poll_controller,
+#endif
};
/**
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 30fed08d1674..0df36c6ec7f4 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -622,7 +622,7 @@ static const struct net_device_ops w5100_netdev_ops = {
static int w5100_hw_probe(struct platform_device *pdev)
{
- struct wiznet_platform_data *data = pdev->dev.platform_data;
+ struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5100_priv *priv = netdev_priv(ndev);
const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index e92884564e1e..71c27b3292f1 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -542,7 +542,7 @@ static const struct net_device_ops w5300_netdev_ops = {
static int w5300_hw_probe(struct platform_device *pdev)
{
- struct wiznet_platform_data *data = pdev->dev.platform_data;
+ struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5300_priv *priv = netdev_priv(ndev);
const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 58eb4488beff..b88121f240ca 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index fb7d1c28a2ea..b2ff038d6d20 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -201,17 +201,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/*
* Allocate the Tx and Rx buffer descriptors.
*/
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p,
- GFP_KERNEL | __GFP_ZERO);
+ lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p,
- GFP_KERNEL | __GFP_ZERO);
+ lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index fd4dbdae5331..4c619ea5189f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1230,8 +1230,7 @@ error:
*/
static int xemaclite_of_remove(struct platform_device *of_dev)
{
- struct device *dev = &of_dev->dev;
- struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(of_dev);
struct net_local *lp = netdev_priv(ndev);
@@ -1250,7 +1249,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
lp->phy_node = NULL;
xemaclite_remove_ndev(ndev, of_dev);
- dev_set_drvdata(dev, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 3d689fcb7917..e78802e75ea6 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1384,7 +1384,7 @@ static int eth_init_one(struct platform_device *pdev)
{
struct port *port;
struct net_device *dev;
- struct eth_plat_info *plat = pdev->dev.platform_data;
+ struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
int err;
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 4c8ddc944d51..0b40e1c46f07 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1068,9 +1068,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
#endif
sizeof(PI_CONSUMER_BLOCK) +
(PI_ALIGN_K_DESC_BLK - 1);
- bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
- &bp->kmalloced_dma,
- GFP_ATOMIC | __GFP_ZERO);
+ bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
+ &bp->kmalloced_dma,
+ GFP_ATOMIC);
if (top_v == NULL)
return DFX_K_FAILURE;
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 3adb43ce138f..7bbd318bc93e 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -351,16 +351,16 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Allocate memory if needed */
self->rx_buff.head =
- dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
self->tx_buff.head =
- dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 9cf836b57c49..ceeb53737f86 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -430,8 +430,8 @@ static int __init nsc_ircc_open(chipio_t *info)
/* Allocate memory if needed */
self->rx_buff.head =
- dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto out2;
@@ -439,8 +439,8 @@ static int __init nsc_ircc_open(chipio_t *info)
}
self->tx_buff.head =
- dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto out3;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 964b116a0ab7..3eeaaf800494 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -915,7 +915,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err == 0)
- dev_set_drvdata(&pdev->dev, dev);
+ platform_set_drvdata(pdev, dev);
if (err) {
if (si->pdata->shutdown)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index aa05dad75335..0dcdf1592f6b 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -562,14 +562,14 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
self->rx_buff.head =
- dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL)
goto err_out2;
self->tx_buff.head =
- dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL)
goto err_out3;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 2dcc60fb37f1..9abaec27f962 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -361,16 +361,16 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
/* Allocate memory if needed */
self->rx_buff.head =
- dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
self->tx_buff.head =
- dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index bb8857a158a6..e641bb240362 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -215,16 +215,16 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
/* Allocate memory if needed */
self->rx_buff.head =
- dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out1;
}
self->tx_buff.head =
- dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 16b43bf544b7..64dfaa303dcc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -600,6 +600,9 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!vlan->port->passthru)
return -EOPNOTSUPP;
+ if (flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
if (is_unicast_ether_addr(addr))
err = dev_uc_add_excl(dev, addr);
else if (is_multicast_ether_addr(addr))
@@ -683,7 +686,7 @@ void macvlan_common_setup(struct net_device *dev)
dev->priv_flags |= IFF_UNICAST_FLT;
dev->netdev_ops = &macvlan_netdev_ops;
dev->destructor = free_netdev;
- dev->header_ops = &macvlan_hard_header_ops,
+ dev->header_ops = &macvlan_hard_header_ops;
dev->ethtool_ops = &macvlan_ethtool_ops;
}
EXPORT_SYMBOL_GPL(macvlan_common_setup);
@@ -820,7 +823,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (port->count)
return -EINVAL;
port->passthru = true;
- memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(dev, lowerdev);
}
err = netdev_upper_dev_link(lowerdev, dev);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index ea53abb20988..9dccb1edfd2a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -529,7 +529,7 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err);
+ err, 0);
if (!skb)
return NULL;
@@ -541,86 +541,6 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
return skb;
}
-/* set skb frags from iovec, this can move to core network code for reuse */
-static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
- int offset, size_t count)
-{
- int len = iov_length(from, count) - offset;
- int copy = skb_headlen(skb);
- int size, offset1 = 0;
- int i = 0;
-
- /* Skip over from offset */
- while (count && (offset >= from->iov_len)) {
- offset -= from->iov_len;
- ++from;
- --count;
- }
-
- /* copy up to skb headlen */
- while (count && (copy > 0)) {
- size = min_t(unsigned int, copy, from->iov_len - offset);
- if (copy_from_user(skb->data + offset1, from->iov_base + offset,
- size))
- return -EFAULT;
- if (copy > size) {
- ++from;
- --count;
- offset = 0;
- } else
- offset += size;
- copy -= size;
- offset1 += size;
- }
-
- if (len == offset1)
- return 0;
-
- while (count--) {
- struct page *page[MAX_SKB_FRAGS];
- int num_pages;
- unsigned long base;
- unsigned long truesize;
-
- len = from->iov_len - offset;
- if (!len) {
- offset = 0;
- ++from;
- continue;
- }
- base = (unsigned long)from->iov_base + offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (i + size > MAX_SKB_FRAGS)
- return -EMSGSIZE;
- num_pages = get_user_pages_fast(base, size, 0, &page[i]);
- if (num_pages != size) {
- int j;
-
- for (j = 0; j < num_pages; j++)
- put_page(page[i + j]);
- return -EFAULT;
- }
- truesize = size * PAGE_SIZE;
- skb->data_len += len;
- skb->len += len;
- skb->truesize += truesize;
- atomic_add(truesize, &skb->sk->sk_wmem_alloc);
- while (len) {
- int off = base & ~PAGE_MASK;
- int size = min_t(int, len, PAGE_SIZE - off);
- __skb_fill_page_desc(skb, i, page[i], off, size);
- skb_shinfo(skb)->nr_frags++;
- /* increase sk_wmem_alloc */
- base += size;
- len -= size;
- i++;
- }
- offset = 0;
- ++from;
- }
- return 0;
-}
-
/*
* macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
* be shared with the tun/tap driver.
@@ -703,29 +623,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
return 0;
}
-static unsigned long iov_pages(const struct iovec *iv, int offset,
- unsigned long nr_segs)
-{
- unsigned long seg, base;
- int pages = 0, len, size;
-
- while (nr_segs && (offset >= iv->iov_len)) {
- offset -= iv->iov_len;
- ++iv;
- --nr_segs;
- }
-
- for (seg = 0; seg < nr_segs; seg++) {
- base = (unsigned long)iv[seg].iov_base + offset;
- len = iv[seg].iov_len - offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- pages += size;
- offset = 0;
- }
-
- return pages;
-}
-
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
const struct iovec *iv, unsigned long total_len,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4822aafe638b..dcb21347c670 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -102,6 +102,7 @@ struct netconsole_target {
struct config_item item;
#endif
int enabled;
+ struct mutex mutex;
struct netpoll np;
};
@@ -181,6 +182,7 @@ static struct netconsole_target *alloc_param_target(char *target_config)
strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
+ mutex_init(&nt->mutex);
memset(nt->np.remote_mac, 0xff, ETH_ALEN);
/* Parse parameters and setup netpoll */
@@ -322,6 +324,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
return -EINVAL;
}
+ mutex_lock(&nt->mutex);
if (enabled) { /* 1 */
/*
@@ -331,8 +334,10 @@ static ssize_t store_enabled(struct netconsole_target *nt,
netpoll_print_options(&nt->np);
err = netpoll_setup(&nt->np);
- if (err)
+ if (err) {
+ mutex_unlock(&nt->mutex);
return err;
+ }
printk(KERN_INFO "netconsole: network logging started\n");
@@ -341,6 +346,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
}
nt->enabled = enabled;
+ mutex_unlock(&nt->mutex);
return strnlen(buf, count);
}
@@ -597,6 +603,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
+ mutex_init(&nt->mutex);
memset(nt->np.remote_mac, 0xff, ETH_ALEN);
/* Initialize the config_item member */
@@ -682,7 +689,11 @@ restart:
* we might sleep in __netpoll_cleanup()
*/
spin_unlock_irqrestore(&target_list_lock, flags);
+
+ mutex_lock(&nt->mutex);
__netpoll_cleanup(&nt->np);
+ mutex_unlock(&nt->mutex);
+
spin_lock_irqsave(&target_list_lock, flags);
dev_put(nt->np.dev);
nt->np.dev = NULL;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index a47f9236d966..8004acbef2c9 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -191,7 +191,7 @@ static int mdio_gpio_probe(struct platform_device *pdev)
pdata = mdio_gpio_of_get_data(pdev);
bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
} else {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
bus_id = pdev->id;
}
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index e91d7d736ae2..d2dd9e473e2c 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -106,7 +106,7 @@ err:
static int mdio_mux_gpio_remove(struct platform_device *pdev)
{
- struct mdio_mux_gpio_state *s = pdev->dev.platform_data;
+ struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
mdio_mux_uninit(s->mux_handle);
return 0;
}
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 9733bd239a86..f8e305d8da76 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -48,7 +48,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
struct mdio_mux_mmioreg_state *s = data;
if (current_child ^ desired_child) {
- void *p = ioremap(s->phys, 1);
+ void __iomem *p = ioremap(s->phys, 1);
uint8_t x, y;
if (!p)
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index b51fa1f469b0..6aee02ed97ac 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -222,7 +222,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
bus->mii_bus->read = octeon_mdiobus_read;
bus->mii_bus->write = octeon_mdiobus_write;
- dev_set_drvdata(&pdev->dev, bus);
+ platform_set_drvdata(pdev, bus);
err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node);
if (err)
@@ -244,7 +244,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
struct octeon_mdiobus *bus;
union cvmx_smix_en smi_en;
- bus = dev_get_drvdata(&pdev->dev);
+ bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus->mii_bus);
mdiobus_free(bus->mii_bus);
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 7f25e49ae37f..18969b3ad8bb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -101,6 +101,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mii_bus *bus;
struct sun4i_mdio_data *data;
+ struct resource *res;
int ret, i;
bus = mdiobus_alloc_size(sizeof(*data));
@@ -114,7 +115,8 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
bus->parent = &pdev->dev;
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
if (!bus->irq) {
ret = -ENOMEM;
goto err_out_free_mdiobus;
@@ -124,10 +126,11 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
bus->irq[i] = PHY_POLL;
data = bus->priv;
- data->membase = of_iomap(np, 0);
- if (!data->membase) {
- ret = -ENOMEM;
- goto err_out_free_mdio_irq;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase)) {
+ ret = PTR_ERR(data->membase);
+ goto err_out_free_mdiobus;
}
data->regulator = devm_regulator_get(&pdev->dev, "phy");
@@ -139,7 +142,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
} else {
ret = regulator_enable(data->regulator);
if (ret)
- goto err_out_free_mdio_irq;
+ goto err_out_free_mdiobus;
}
ret = of_mdiobus_register(bus, np);
@@ -152,8 +155,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
err_out_disable_regulator:
regulator_disable(data->regulator);
-err_out_free_mdio_irq:
- kfree(bus->irq);
err_out_free_mdiobus:
mdiobus_free(bus);
return ret;
@@ -164,7 +165,6 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
struct mii_bus *bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus);
- kfree(bus->irq);
mdiobus_free(bus);
return 0;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2510435f34ed..c31aad0004cb 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/micrel_phy.h>
+#include <linux/of.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -53,6 +54,20 @@
#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
+/* Write/read to/from extended registers */
+#define MII_KSZPHY_EXTREG 0x0b
+#define KSZPHY_EXTREG_WRITE 0x8000
+
+#define MII_KSZPHY_EXTREG_WRITE 0x0c
+#define MII_KSZPHY_EXTREG_READ 0x0d
+
+/* Extended registers */
+#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104
+#define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105
+#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
+
+#define PS_TO_REG 200
+
static int ksz_config_flags(struct phy_device *phydev)
{
int regval;
@@ -65,6 +80,20 @@ static int ksz_config_flags(struct phy_device *phydev)
return 0;
}
+static int kszphy_extended_write(struct phy_device *phydev,
+ u32 regnum, u16 val)
+{
+ phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum);
+ return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val);
+}
+
+static int kszphy_extended_read(struct phy_device *phydev,
+ u32 regnum)
+{
+ phy_write(phydev, MII_KSZPHY_EXTREG, regnum);
+ return phy_read(phydev, MII_KSZPHY_EXTREG_READ);
+}
+
static int kszphy_ack_interrupt(struct phy_device *phydev)
{
/* bit[7..0] int status, which is a read and clear register. */
@@ -141,10 +170,82 @@ static int ks8051_config_init(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
+static int ksz9021_load_values_from_of(struct phy_device *phydev,
+ struct device_node *of_node, u16 reg,
+ char *field1, char *field2,
+ char *field3, char *field4)
+{
+ int val1 = -1;
+ int val2 = -2;
+ int val3 = -3;
+ int val4 = -4;
+ int newval;
+ int matches = 0;
+
+ if (!of_property_read_u32(of_node, field1, &val1))
+ matches++;
+
+ if (!of_property_read_u32(of_node, field2, &val2))
+ matches++;
+
+ if (!of_property_read_u32(of_node, field3, &val3))
+ matches++;
+
+ if (!of_property_read_u32(of_node, field4, &val4))
+ matches++;
+
+ if (!matches)
+ return 0;
+
+ if (matches < 4)
+ newval = kszphy_extended_read(phydev, reg);
+ else
+ newval = 0;
+
+ if (val1 != -1)
+ newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
+
+ if (val2 != -1)
+ newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
+
+ if (val3 != -1)
+ newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
+
+ if (val4 != -1)
+ newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
+
+ return kszphy_extended_write(phydev, reg, newval);
+}
+
+static int ksz9021_config_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (of_node) {
+ ksz9021_load_values_from_of(phydev, of_node,
+ MII_KSZPHY_CLK_CONTROL_PAD_SKEW,
+ "txen-skew-ps", "txc-skew-ps",
+ "rxdv-skew-ps", "rxc-skew-ps");
+ ksz9021_load_values_from_of(phydev, of_node,
+ MII_KSZPHY_RX_DATA_PAD_SKEW,
+ "rxd0-skew-ps", "rxd1-skew-ps",
+ "rxd2-skew-ps", "rxd3-skew-ps");
+ ksz9021_load_values_from_of(phydev, of_node,
+ MII_KSZPHY_TX_DATA_PAD_SKEW,
+ "txd0-skew-ps", "txd1-skew-ps",
+ "txd2-skew-ps", "txd3-skew-ps");
+ }
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
-int ksz8873mll_read_status(struct phy_device *phydev)
+static int ksz8873mll_read_status(struct phy_device *phydev)
{
int regval;
@@ -281,7 +382,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ9021 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ksz9021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 162464fe86bf..6fa5ae00039f 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -47,7 +47,7 @@
#define MAX_CALLID 65535
static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
-static struct pppox_sock **callid_sock;
+static struct pppox_sock __rcu **callid_sock;
static DEFINE_SPINLOCK(chan_lock);
@@ -83,11 +83,11 @@ static const struct proto_ops pptp_ops;
struct pptp_gre_header {
u8 flags;
u8 ver;
- u16 protocol;
- u16 payload_len;
- u16 call_id;
- u32 seq;
- u32 ack;
+ __be16 protocol;
+ __be16 payload_len;
+ __be16 call_id;
+ __be32 seq;
+ __be32 ack;
} __packed;
static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bff7e0b0b4e7..50e43e64d51d 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -622,6 +622,86 @@ static int team_change_mode(struct team *team, const char *kind)
}
+/*********************
+ * Peers notification
+ *********************/
+
+static void team_notify_peers_work(struct work_struct *work)
+{
+ struct team *team;
+
+ team = container_of(work, struct team, notify_peers.dw.work);
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+ return;
+ }
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
+ rtnl_unlock();
+ if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+ schedule_delayed_work(&team->notify_peers.dw,
+ msecs_to_jiffies(team->notify_peers.interval));
+}
+
+static void team_notify_peers(struct team *team)
+{
+ if (!team->notify_peers.count || !netif_running(team->dev))
+ return;
+ atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+}
+
+static void team_notify_peers_init(struct team *team)
+{
+ INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
+}
+
+static void team_notify_peers_fini(struct team *team)
+{
+ cancel_delayed_work_sync(&team->notify_peers.dw);
+}
+
+
+/*******************************
+ * Send multicast group rejoins
+ *******************************/
+
+static void team_mcast_rejoin_work(struct work_struct *work)
+{
+ struct team *team;
+
+ team = container_of(work, struct team, mcast_rejoin.dw.work);
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+ return;
+ }
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
+ rtnl_unlock();
+ if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+ schedule_delayed_work(&team->mcast_rejoin.dw,
+ msecs_to_jiffies(team->mcast_rejoin.interval));
+}
+
+static void team_mcast_rejoin(struct team *team)
+{
+ if (!team->mcast_rejoin.count || !netif_running(team->dev))
+ return;
+ atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
+ schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+}
+
+static void team_mcast_rejoin_init(struct team *team)
+{
+ INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
+}
+
+static void team_mcast_rejoin_fini(struct team *team)
+{
+ cancel_delayed_work_sync(&team->mcast_rejoin.dw);
+}
+
+
/************************
* Rx path frame handler
************************/
@@ -846,6 +926,8 @@ static void team_port_enable(struct team *team,
team_queue_override_port_add(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
+ team_notify_peers(team);
+ team_mcast_rejoin(team);
}
static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -875,6 +957,8 @@ static void team_port_disable(struct team *team,
team->en_port_count--;
team_queue_override_port_del(team, port);
team_adjust_ops(team);
+ team_notify_peers(team);
+ team_mcast_rejoin(team);
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -953,6 +1037,9 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
struct netpoll *np;
int err;
+ if (!team->dev->npinfo)
+ return 0;
+
np = kzalloc(sizeof(*np), gfp);
if (!np)
return -ENOMEM;
@@ -979,12 +1066,6 @@ static void team_port_disable_netpoll(struct team_port *port)
__netpoll_cleanup(np);
kfree(np);
}
-
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
- return team->dev->npinfo;
-}
-
#else
static int team_port_enable_netpoll(struct team *team, struct team_port *port,
gfp_t gfp)
@@ -994,10 +1075,6 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
static void team_port_disable_netpoll(struct team_port *port)
{
}
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
- return NULL;
-}
#endif
static void __team_port_change_port_added(struct team_port *port, bool linkup);
@@ -1079,13 +1156,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- if (team_netpoll_info(team)) {
- err = team_port_enable_netpoll(team, port, GFP_KERNEL);
- if (err) {
- netdev_err(dev, "Failed to enable netpoll on device %s\n",
- portname);
- goto err_enable_netpoll;
- }
+ err = team_port_enable_netpoll(team, port, GFP_KERNEL);
+ if (err) {
+ netdev_err(dev, "Failed to enable netpoll on device %s\n",
+ portname);
+ goto err_enable_netpoll;
}
err = netdev_master_upper_dev_link(port_dev, dev);
@@ -1205,6 +1280,62 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
return team_change_mode(team, ctx->data.str_val);
}
+static int team_notify_peers_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.count;
+ return 0;
+}
+
+static int team_notify_peers_count_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.count = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_notify_peers_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.interval;
+ return 0;
+}
+
+static int team_notify_peers_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.interval = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_mcast_rejoin_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->mcast_rejoin.count;
+ return 0;
+}
+
+static int team_mcast_rejoin_count_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->mcast_rejoin.count = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_mcast_rejoin_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->mcast_rejoin.interval;
+ return 0;
+}
+
+static int team_mcast_rejoin_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->mcast_rejoin.interval = ctx->data.u32_val;
+ return 0;
+}
+
static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
@@ -1317,6 +1448,30 @@ static const struct team_option team_options[] = {
.setter = team_mode_option_set,
},
{
+ .name = "notify_peers_count",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_count_get,
+ .setter = team_notify_peers_count_set,
+ },
+ {
+ .name = "notify_peers_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_interval_get,
+ .setter = team_notify_peers_interval_set,
+ },
+ {
+ .name = "mcast_rejoin_count",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_mcast_rejoin_count_get,
+ .setter = team_mcast_rejoin_count_set,
+ },
+ {
+ .name = "mcast_rejoin_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_mcast_rejoin_interval_get,
+ .setter = team_mcast_rejoin_interval_set,
+ },
+ {
.name = "enabled",
.type = TEAM_OPTION_TYPE_BOOL,
.per_port = true,
@@ -1396,6 +1551,10 @@ static int team_init(struct net_device *dev)
INIT_LIST_HEAD(&team->option_list);
INIT_LIST_HEAD(&team->option_inst_list);
+
+ team_notify_peers_init(team);
+ team_mcast_rejoin_init(team);
+
err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
if (err)
goto err_options_register;
@@ -1406,6 +1565,8 @@ static int team_init(struct net_device *dev)
return 0;
err_options_register:
+ team_mcast_rejoin_fini(team);
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
err_team_queue_override_init:
free_percpu(team->pcpu_stats);
@@ -1425,6 +1586,8 @@ static void team_uninit(struct net_device *dev)
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ team_mcast_rejoin_fini(team);
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
mutex_unlock(&team->lock);
}
@@ -1811,7 +1974,7 @@ static void team_setup_by_port(struct net_device *dev,
dev->addr_len = port_dev->addr_len;
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
- memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+ eth_hw_addr_inherit(dev, port_dev);
}
static int team_dev_type_check_change(struct net_device *dev,
@@ -2698,6 +2861,10 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid to change type of underlaying device */
return NOTIFY_BAD;
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+ call_netdevice_notifiers(event, port->team->dev);
+ break;
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 71af122edf2d..a639de8401f8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -60,6 +60,7 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_tun.h>
+#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
@@ -137,7 +138,10 @@ struct tun_file {
struct fasync_struct *fasync;
/* only used for fasnyc */
unsigned int flags;
- u16 queue_index;
+ union {
+ u16 queue_index;
+ unsigned int ifindex;
+ };
struct list_head next;
struct tun_struct *detached;
};
@@ -405,6 +409,12 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
return tun;
}
+static void tun_queue_purge(struct tun_file *tfile)
+{
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ skb_queue_purge(&tfile->sk.sk_error_queue);
+}
+
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
@@ -431,7 +441,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
/* Drop read queue */
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_queue_purge(tfile);
tun_set_real_num_queues(tun);
} else if (tfile->detached && clean) {
tun = tun_enable_queue(tfile);
@@ -483,12 +493,12 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
/* Drop read queue */
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
BUG_ON(tun->numdisabled != 0);
@@ -497,7 +507,7 @@ static void tun_detach_all(struct net_device *dev)
module_put(THIS_MODULE);
}
-static int tun_attach(struct tun_struct *tun, struct file *file)
+static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
{
struct tun_file *tfile = file->private_data;
int err;
@@ -522,7 +532,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
err = 0;
/* Re-attach the filter to presist device */
- if (tun->filter_attached == true) {
+ if (!skip_filter && (tun->filter_attached == true)) {
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
if (!err)
goto out;
@@ -739,10 +749,17 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
>= dev->tx_queue_len / tun->numqueues)
goto drop;
- /* Orphan the skb - required as we might hang on to it
- * for indefinite time. */
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop;
+
+ if (skb->sk) {
+ sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
+ sw_tx_timestamp(skb);
+ }
+
+ /* Orphan the skb - required as we might hang on to it
+ * for indefinite time.
+ */
skb_orphan(skb);
nf_reset(skb);
@@ -943,7 +960,7 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- &err);
+ &err, 0);
if (!skb)
return ERR_PTR(err);
@@ -955,109 +972,6 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
-/* set skb frags from iovec, this can move to core network code for reuse */
-static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
- int offset, size_t count)
-{
- int len = iov_length(from, count) - offset;
- int copy = skb_headlen(skb);
- int size, offset1 = 0;
- int i = 0;
-
- /* Skip over from offset */
- while (count && (offset >= from->iov_len)) {
- offset -= from->iov_len;
- ++from;
- --count;
- }
-
- /* copy up to skb headlen */
- while (count && (copy > 0)) {
- size = min_t(unsigned int, copy, from->iov_len - offset);
- if (copy_from_user(skb->data + offset1, from->iov_base + offset,
- size))
- return -EFAULT;
- if (copy > size) {
- ++from;
- --count;
- offset = 0;
- } else
- offset += size;
- copy -= size;
- offset1 += size;
- }
-
- if (len == offset1)
- return 0;
-
- while (count--) {
- struct page *page[MAX_SKB_FRAGS];
- int num_pages;
- unsigned long base;
- unsigned long truesize;
-
- len = from->iov_len - offset;
- if (!len) {
- offset = 0;
- ++from;
- continue;
- }
- base = (unsigned long)from->iov_base + offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (i + size > MAX_SKB_FRAGS)
- return -EMSGSIZE;
- num_pages = get_user_pages_fast(base, size, 0, &page[i]);
- if (num_pages != size) {
- int j;
-
- for (j = 0; j < num_pages; j++)
- put_page(page[i + j]);
- return -EFAULT;
- }
- truesize = size * PAGE_SIZE;
- skb->data_len += len;
- skb->len += len;
- skb->truesize += truesize;
- atomic_add(truesize, &skb->sk->sk_wmem_alloc);
- while (len) {
- int off = base & ~PAGE_MASK;
- int size = min_t(int, len, PAGE_SIZE - off);
- __skb_fill_page_desc(skb, i, page[i], off, size);
- skb_shinfo(skb)->nr_frags++;
- /* increase sk_wmem_alloc */
- base += size;
- len -= size;
- i++;
- }
- offset = 0;
- ++from;
- }
- return 0;
-}
-
-static unsigned long iov_pages(const struct iovec *iv, int offset,
- unsigned long nr_segs)
-{
- unsigned long seg, base;
- int pages = 0, len, size;
-
- while (nr_segs && (offset >= iv->iov_len)) {
- offset -= iv->iov_len;
- ++iv;
- --nr_segs;
- }
-
- for (seg = 0; seg < nr_segs; seg++) {
- base = (unsigned long)iv[seg].iov_base + offset;
- len = iv[seg].iov_len - offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- pages += size;
- offset = 0;
- }
-
- return pages;
-}
-
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, const struct iovec *iv,
@@ -1262,6 +1176,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
+ int vlan_offset = 0;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
@@ -1325,11 +1240,40 @@ static ssize_t tun_put_user(struct tun_struct *tun,
total += tun->vnet_hdr_sz;
}
- len = min_t(int, skb->len, len);
+ if (!vlan_tx_tag_present(skb)) {
+ len = min_t(int, skb->len, len);
+ } else {
+ int copy, ret;
+ struct {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ } veth;
+
+ veth.h_vlan_proto = skb->vlan_proto;
+ veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+
+ vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ len = min_t(int, skb->len + VLAN_HLEN, len);
+
+ copy = min_t(int, vlan_offset, len);
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
+ len -= copy;
+ total += copy;
+ if (ret || !len)
+ goto done;
+
+ copy = min_t(int, sizeof(veth), len);
+ ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
+ len -= copy;
+ total += copy;
+ if (ret || !len)
+ goto done;
+ }
- skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
- total += skb->len;
+ skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
+ total += len;
+done:
tun->dev->stats.tx_packets++;
tun->dev->stats.tx_bytes += len;
@@ -1478,7 +1422,6 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
return ret;
}
-
static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len,
int flags)
@@ -1490,10 +1433,15 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!tun)
return -EBADFD;
- if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
ret = -EINVAL;
goto out;
}
+ if (flags & MSG_ERRQUEUE) {
+ ret = sock_recv_errqueue(sock->sk, m, total_len,
+ SOL_PACKET, TUN_TX_TIMESTAMP);
+ goto out;
+ }
ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
@@ -1617,7 +1565,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (err < 0)
return err;
- err = tun_attach(tun, file);
+ err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
if (err < 0)
return err;
@@ -1664,6 +1612,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
+ dev->ifindex = tfile->ifindex;
tun = netdev_priv(dev);
tun->dev = dev;
@@ -1684,12 +1633,13 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun_flow_init(tun);
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
- TUN_USER_FEATURES;
+ TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
dev->features = dev->hw_features;
dev->vlan_features = dev->features;
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file);
+ err = tun_attach(tun, file, false);
if (err < 0)
goto err_free_dev;
@@ -1853,7 +1803,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file);
+ ret = tun_attach(tun, file, false);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
@@ -1879,6 +1829,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
kgid_t group;
int sndbuf;
int vnet_hdr_sz;
+ unsigned int ifindex;
int ret;
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
@@ -1913,6 +1864,19 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EFAULT;
goto unlock;
}
+ if (cmd == TUNSETIFINDEX) {
+ ret = -EPERM;
+ if (tun)
+ goto unlock;
+
+ ret = -EFAULT;
+ if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
+ goto unlock;
+
+ ret = 0;
+ tfile->ifindex = ifindex;
+ goto unlock;
+ }
ret = -EBADFD;
if (!tun)
@@ -1925,6 +1889,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case TUNGETIFF:
tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ if (tfile->detached)
+ ifr.ifr_flags |= IFF_DETACH_QUEUE;
+ if (!tfile->socket.sk->sk_filter)
+ ifr.ifr_flags |= IFF_NOFILTER;
+
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
@@ -2081,6 +2050,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun_detach_filter(tun, tun->numqueues);
break;
+ case TUNGETFILTER:
+ ret = -EINVAL;
+ if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ break;
+ ret = -EFAULT;
+ if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
+ break;
+ ret = 0;
+ break;
+
default:
ret = -EINVAL;
break;
@@ -2161,6 +2140,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
rcu_assign_pointer(tfile->tun, NULL);
tfile->net = get_net(current->nsproxy->net_ns);
tfile->flags = 0;
+ tfile->ifindex = 0;
rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
init_waitqueue_head(&tfile->wq.wait);
@@ -2276,6 +2256,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index d84bfd4109a4..40db31233313 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -268,6 +268,14 @@ config USB_NET_DM9601
This option adds support for Davicom DM9601 based USB 1.1
10/100 Ethernet adapters.
+config USB_NET_SR9700
+ tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
+ depends on USB_USBNET
+ select CRC32
+ help
+ This option adds support for CoreChip-sz SR9700 based USB 1.1
+ 10/100 Ethernet adapters.
+
config USB_NET_SMSC75XX
tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index e8171784529d..8b342cf992fd 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
+obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 346c032aa795..bdaa12d07a12 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -178,6 +178,8 @@ struct asix_common_private {
struct asix_rx_fixup_info rx_fixup_info;
};
+extern const struct driver_info ax88172a_info;
+
/* ASIX specific flags */
#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index ad5d1e4384db..386a3df53678 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -778,6 +778,9 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
dev->hard_mtu = net->mtu + net->hard_header_len;
ax88178_set_mfb(dev);
+ /* max qlen depend on hard_mtu and rx_urb_size */
+ usbnet_update_max_qlen(dev);
+
return 0;
}
@@ -943,8 +946,6 @@ static const struct driver_info hg20f9_info = {
.data = FLAG_EEPROM_MAC,
};
-extern const struct driver_info ax88172a_info;
-
static const struct usb_device_id products [] = {
{
// Linksys USB200M
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index d012203b0f29..723b3879ecc2 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -161,7 +161,8 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_set_rx_mode = asix_set_multicast,
};
-int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88172a_get_settings(struct net_device *net,
+ struct ethtool_cmd *cmd)
{
if (!net->phydev)
return -ENODEV;
@@ -169,7 +170,8 @@ int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
return phy_ethtool_gset(net->phydev, cmd);
}
-int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88172a_set_settings(struct net_device *net,
+ struct ethtool_cmd *cmd)
{
if (!net->phydev)
return -ENODEV;
@@ -177,7 +179,7 @@ int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
return phy_ethtool_sset(net->phydev, cmd);
}
-int ax88172a_nway_reset(struct net_device *net)
+static int ax88172a_nway_reset(struct net_device *net)
{
if (!net->phydev)
return -ENODEV;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 4233c05a3e32..3569293df872 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -688,6 +688,9 @@ static int ax88179_change_mtu(struct net_device *net, int new_mtu)
2, 2, &tmp16);
}
+ /* max qlen depend on hard_mtu and rx_urb_size */
+ usbnet_update_max_qlen(dev);
+
return 0;
}
@@ -1174,31 +1177,18 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
int frame_size = dev->maxpacket;
int mss = skb_shinfo(skb)->gso_size;
int headroom;
- int tailroom;
tx_hdr1 = skb->len;
tx_hdr2 = mss;
if (((skb->len + 8) % frame_size) == 0)
tx_hdr2 |= 0x80008000; /* Enable padding */
- headroom = skb_headroom(skb);
- tailroom = skb_tailroom(skb);
-
- if (!skb_header_cloned(skb) &&
- !skb_cloned(skb) &&
- (headroom + tailroom) >= 8) {
- if (headroom < 8) {
- skb->data = memmove(skb->head + 8, skb->data, skb->len);
- skb_set_tail_pointer(skb, skb->len);
- }
- } else {
- struct sk_buff *skb2;
+ headroom = skb_headroom(skb) - 8;
- skb2 = skb_copy_expand(skb, 8, 0, flags);
+ if ((skb_header_cloned(skb) || headroom < 0) &&
+ pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) {
dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
+ return NULL;
}
skb_push(skb, 4);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 606eba2872bd..3a8131582e75 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -323,7 +323,7 @@ next_desc:
/* Never use the same address on both ends of the link, even
* if the buggy firmware told us to.
*/
- if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr))
+ if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
eth_hw_addr_random(dev->net);
/* make MAC addr easily distinguishable from an IP header */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 11c51f275366..f3fce412c0c1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -19,9 +19,12 @@
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
/* Version Information */
-#define DRIVER_VERSION "v1.0.0 (2013/05/03)"
+#define DRIVER_VERSION "v1.01.0 (2013/08/12)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
#define MODULENAME "r8152"
@@ -267,6 +270,12 @@ enum rtl_register_content {
FULL_DUP = 0x01,
};
+#define RTL8152_MAX_TX 10
+#define RTL8152_MAX_RX 10
+#define INTBUFSIZE 2
+
+#define INTR_LINK 0x0004
+
#define RTL8152_REQT_READ 0xc0
#define RTL8152_REQT_WRITE 0x40
#define RTL8152_REQ_GET_REGS 0x05
@@ -285,9 +294,9 @@ enum rtl_register_content {
/* rtl8152 flags */
enum rtl8152_flags {
RTL8152_UNPLUG = 0,
- RX_URB_FAIL,
RTL8152_SET_RX_MODE,
- WORK_ENABLE
+ WORK_ENABLE,
+ RTL8152_LINK_CHG,
};
/* Define these values to match your device */
@@ -311,21 +320,53 @@ struct tx_desc {
u32 opts1;
#define TX_FS (1 << 31) /* First segment of a packet */
#define TX_LS (1 << 30) /* Final segment of a packet */
-#define TX_LEN_MASK 0xffff
+#define TX_LEN_MASK 0x3ffff
+
u32 opts2;
+#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
+#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
+#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
+#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */
+};
+
+struct r8152;
+
+struct rx_agg {
+ struct list_head list;
+ struct urb *urb;
+ struct r8152 *context;
+ void *buffer;
+ void *head;
+};
+
+struct tx_agg {
+ struct list_head list;
+ struct urb *urb;
+ struct r8152 *context;
+ void *buffer;
+ void *head;
+ u32 skb_num;
+ u32 skb_len;
};
struct r8152 {
unsigned long flags;
struct usb_device *udev;
struct tasklet_struct tl;
+ struct usb_interface *intf;
struct net_device *netdev;
- struct urb *rx_urb, *tx_urb;
- struct sk_buff *tx_skb, *rx_skb;
+ struct urb *intr_urb;
+ struct tx_agg tx_info[RTL8152_MAX_TX];
+ struct rx_agg rx_info[RTL8152_MAX_RX];
+ struct list_head rx_done, tx_free;
+ struct sk_buff_head tx_queue;
+ spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
+ int intr_interval;
u32 msg_enable;
u16 ocp_base;
+ u8 *intr_buff;
u8 version;
u8 speed;
};
@@ -340,6 +381,7 @@ enum rtl_version {
* The RTL chips use a 64 element hash table based on the Ethernet CRC.
*/
static const int multicast_filter_limit = 32;
+static unsigned int rx_buf_sz = 16384;
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
@@ -686,6 +728,9 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
}
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
+
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
@@ -716,26 +761,6 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
-static int alloc_all_urbs(struct r8152 *tp)
-{
- tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tp->rx_urb)
- return 0;
- tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tp->tx_urb) {
- usb_free_urb(tp->rx_urb);
- return 0;
- }
-
- return 1;
-}
-
-static void free_all_urbs(struct r8152 *tp)
-{
- usb_free_urb(tp->rx_urb);
- usb_free_urb(tp->tx_urb);
-}
-
static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
{
return &dev->stats;
@@ -743,137 +768,574 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
static void read_bulk_callback(struct urb *urb)
{
- struct r8152 *tp;
- unsigned pkt_len;
- struct sk_buff *skb;
struct net_device *netdev;
- struct net_device_stats *stats;
+ unsigned long flags;
int status = urb->status;
+ struct rx_agg *agg;
+ struct r8152 *tp;
int result;
- struct rx_desc *rx_desc;
- tp = urb->context;
+ agg = urb->context;
+ if (!agg)
+ return;
+
+ tp = agg->context;
if (!tp)
return;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
netdev = tp->netdev;
- if (!netif_device_present(netdev))
+
+ /* When link down, the driver would cancel all bulks. */
+ /* This avoid the re-submitting bulk */
+ if (!netif_carrier_ok(netdev))
return;
- stats = rtl8152_get_stats(netdev);
switch (status) {
case 0:
- break;
+ if (urb->actual_length < ETH_ZLEN)
+ break;
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ tasklet_schedule(&tp->tl);
+ return;
case -ESHUTDOWN:
set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(tp->netdev);
+ return;
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
pr_warn_ratelimited("may be reset is needed?..\n");
- goto goon;
+ break;
default:
pr_warn_ratelimited("Rx status %d\n", status);
- goto goon;
+ break;
}
- /* protect against short packets (tell me why we got some?!?) */
- if (urb->actual_length < sizeof(*rx_desc))
- goto goon;
-
-
- rx_desc = (struct rx_desc *)urb->transfer_buffer;
- pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
- if (urb->actual_length < sizeof(struct rx_desc) + pkt_len)
- goto goon;
-
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
- if (!skb)
- goto goon;
-
- memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
-goon:
- usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
- (usb_complete_t)read_bulk_callback, tp);
- result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
+ result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
if (result == -ENODEV) {
netif_device_detach(tp->netdev);
} else if (result) {
- set_bit(RX_URB_FAIL, &tp->flags);
- goto resched;
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ tasklet_schedule(&tp->tl);
+ }
+}
+
+static void write_bulk_callback(struct urb *urb)
+{
+ struct net_device_stats *stats;
+ unsigned long flags;
+ struct tx_agg *agg;
+ struct r8152 *tp;
+ int status = urb->status;
+
+ agg = urb->context;
+ if (!agg)
+ return;
+
+ tp = agg->context;
+ if (!tp)
+ return;
+
+ stats = rtl8152_get_stats(tp->netdev);
+ if (status) {
+ pr_warn_ratelimited("Tx status %d\n", status);
+ stats->tx_errors += agg->skb_num;
} else {
- clear_bit(RX_URB_FAIL, &tp->flags);
+ stats->tx_packets += agg->skb_num;
+ stats->tx_bytes += agg->skb_len;
}
- return;
-resched:
- tasklet_schedule(&tp->tl);
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
+
+ if (!netif_carrier_ok(tp->netdev))
+ return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ if (!skb_queue_empty(&tp->tx_queue))
+ tasklet_schedule(&tp->tl);
}
-static void rx_fixup(unsigned long data)
+static void intr_callback(struct urb *urb)
{
struct r8152 *tp;
- int status;
+ __u16 *d;
+ int status = urb->status;
+ int res;
+
+ tp = urb->context;
+ if (!tp)
+ return;
- tp = (struct r8152 *)data;
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
- if (status == -ENODEV) {
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ switch (status) {
+ case 0: /* success */
+ break;
+ case -ECONNRESET: /* unlink */
+ case -ESHUTDOWN:
netif_device_detach(tp->netdev);
- } else if (status) {
- set_bit(RX_URB_FAIL, &tp->flags);
- goto tlsched;
+ case -ENOENT:
+ return;
+ case -EOVERFLOW:
+ netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+ goto resubmit;
+ /* -EPIPE: should clear the halt */
+ default:
+ netif_info(tp, intr, tp->netdev, "intr status %d\n", status);
+ goto resubmit;
+ }
+
+ d = urb->transfer_buffer;
+ if (INTR_LINK & __le16_to_cpu(d[0])) {
+ if (!(tp->speed & LINK_STATUS)) {
+ set_bit(RTL8152_LINK_CHG, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
} else {
- clear_bit(RX_URB_FAIL, &tp->flags);
+ if (tp->speed & LINK_STATUS) {
+ set_bit(RTL8152_LINK_CHG, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
}
- return;
-tlsched:
- tasklet_schedule(&tp->tl);
+resubmit:
+ res = usb_submit_urb(urb, GFP_ATOMIC);
+ if (res == -ENODEV)
+ netif_device_detach(tp->netdev);
+ else if (res)
+ netif_err(tp, intr, tp->netdev,
+ "can't resubmit intr, status %d\n", res);
}
-static void write_bulk_callback(struct urb *urb)
+static inline void *rx_agg_align(void *data)
+{
+ return (void *)ALIGN((uintptr_t)data, 8);
+}
+
+static inline void *tx_agg_align(void *data)
+{
+ return (void *)ALIGN((uintptr_t)data, 4);
+}
+
+static void free_all_mem(struct r8152 *tp)
+{
+ int i;
+
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ if (tp->rx_info[i].urb) {
+ usb_free_urb(tp->rx_info[i].urb);
+ tp->rx_info[i].urb = NULL;
+ }
+
+ if (tp->rx_info[i].buffer) {
+ kfree(tp->rx_info[i].buffer);
+ tp->rx_info[i].buffer = NULL;
+ tp->rx_info[i].head = NULL;
+ }
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++) {
+ if (tp->tx_info[i].urb) {
+ usb_free_urb(tp->tx_info[i].urb);
+ tp->tx_info[i].urb = NULL;
+ }
+
+ if (tp->tx_info[i].buffer) {
+ kfree(tp->tx_info[i].buffer);
+ tp->tx_info[i].buffer = NULL;
+ tp->tx_info[i].head = NULL;
+ }
+ }
+
+ if (tp->intr_urb) {
+ usb_free_urb(tp->intr_urb);
+ tp->intr_urb = NULL;
+ }
+
+ if (tp->intr_buff) {
+ kfree(tp->intr_buff);
+ tp->intr_buff = NULL;
+ }
+}
+
+static int alloc_all_mem(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+ struct usb_interface *intf = tp->intf;
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ struct usb_host_endpoint *ep_intr = alt->endpoint + 2;
+ struct urb *urb;
+ int node, i;
+ u8 *buf;
+
+ node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
+
+ spin_lock_init(&tp->rx_lock);
+ spin_lock_init(&tp->tx_lock);
+ INIT_LIST_HEAD(&tp->rx_done);
+ INIT_LIST_HEAD(&tp->tx_free);
+ skb_queue_head_init(&tp->tx_queue);
+
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+
+ if (buf != rx_agg_align(buf)) {
+ kfree(buf);
+ buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(buf);
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&tp->rx_info[i].list);
+ tp->rx_info[i].context = tp;
+ tp->rx_info[i].urb = urb;
+ tp->rx_info[i].buffer = buf;
+ tp->rx_info[i].head = rx_agg_align(buf);
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++) {
+ buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+
+ if (buf != tx_agg_align(buf)) {
+ kfree(buf);
+ buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(buf);
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&tp->tx_info[i].list);
+ tp->tx_info[i].context = tp;
+ tp->tx_info[i].urb = urb;
+ tp->tx_info[i].buffer = buf;
+ tp->tx_info[i].head = tx_agg_align(buf);
+
+ list_add_tail(&tp->tx_info[i].list, &tp->tx_free);
+ }
+
+ tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tp->intr_urb)
+ goto err1;
+
+ tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL);
+ if (!tp->intr_buff)
+ goto err1;
+
+ tp->intr_interval = (int)ep_intr->desc.bInterval;
+ usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3),
+ tp->intr_buff, INTBUFSIZE, intr_callback,
+ tp, tp->intr_interval);
+
+ return 0;
+
+err1:
+ free_all_mem(tp);
+ return -ENOMEM;
+}
+
+static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
+{
+ struct tx_agg *agg = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ if (!list_empty(&tp->tx_free)) {
+ struct list_head *cursor;
+
+ cursor = tp->tx_free.next;
+ list_del_init(cursor);
+ agg = list_entry(cursor, struct tx_agg, list);
+ }
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
+
+ return agg;
+}
+
+static void
+r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
+{
+ memset(desc, 0, sizeof(*desc));
+
+ desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ __be16 protocol;
+ u8 ip_protocol;
+ u32 opts2 = 0;
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case htons(ETH_P_IP):
+ opts2 |= IPV4_CS;
+ ip_protocol = ip_hdr(skb)->protocol;
+ break;
+
+ case htons(ETH_P_IPV6):
+ opts2 |= IPV6_CS;
+ ip_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+
+ default:
+ ip_protocol = IPPROTO_RAW;
+ break;
+ }
+
+ if (ip_protocol == IPPROTO_TCP) {
+ opts2 |= TCP_CS;
+ opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
+ } else if (ip_protocol == IPPROTO_UDP) {
+ opts2 |= UDP_CS;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+
+ desc->opts2 = cpu_to_le32(opts2);
+ }
+}
+
+static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
+{
+ u32 remain;
+ u8 *tx_data;
+
+ tx_data = agg->head;
+ agg->skb_num = agg->skb_len = 0;
+ remain = rx_buf_sz - sizeof(struct tx_desc);
+
+ while (remain >= ETH_ZLEN) {
+ struct tx_desc *tx_desc;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = skb_dequeue(&tp->tx_queue);
+ if (!skb)
+ break;
+
+ len = skb->len;
+ if (remain < len) {
+ skb_queue_head(&tp->tx_queue, skb);
+ break;
+ }
+
+ tx_desc = (struct tx_desc *)tx_data;
+ tx_data += sizeof(*tx_desc);
+
+ r8152_tx_csum(tp, tx_desc, skb);
+ memcpy(tx_data, skb->data, len);
+ agg->skb_num++;
+ agg->skb_len += len;
+ dev_kfree_skb_any(skb);
+
+ tx_data = tx_agg_align(tx_data + len);
+ remain = rx_buf_sz - sizeof(*tx_desc) -
+ (u32)((void *)tx_data - agg->head);
+ }
+
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ agg->head, (int)(tx_data - (u8 *)agg->head),
+ (usb_complete_t)write_bulk_callback, agg);
+
+ return usb_submit_urb(agg->urb, GFP_ATOMIC);
+}
+
+static void rx_bottom(struct r8152 *tp)
+{
+ unsigned long flags;
+ struct list_head *cursor, *next;
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_for_each_safe(cursor, next, &tp->rx_done) {
+ struct rx_desc *rx_desc;
+ struct rx_agg *agg;
+ unsigned pkt_len;
+ int len_used = 0;
+ struct urb *urb;
+ u8 *rx_data;
+ int ret;
+
+ list_del_init(cursor);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ agg = list_entry(cursor, struct rx_agg, list);
+ urb = agg->urb;
+ if (urb->actual_length < ETH_ZLEN)
+ goto submit;
+
+ rx_desc = agg->head;
+ rx_data = agg->head;
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ len_used += sizeof(struct rx_desc) + pkt_len;
+
+ while (urb->actual_length >= len_used) {
+ struct net_device *netdev = tp->netdev;
+ struct net_device_stats *stats;
+ struct sk_buff *skb;
+
+ if (pkt_len < ETH_ZLEN)
+ break;
+
+ stats = rtl8152_get_stats(netdev);
+
+ pkt_len -= 4; /* CRC */
+ rx_data += sizeof(struct rx_desc);
+
+ skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
+ if (!skb) {
+ stats->rx_dropped++;
+ break;
+ }
+ memcpy(skb->data, rx_data, pkt_len);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+
+ rx_data = rx_agg_align(rx_data + pkt_len + 4);
+ rx_desc = (struct rx_desc *)rx_data;
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ len_used = (int)(rx_data - (u8 *)agg->head);
+ len_used += sizeof(struct rx_desc) + pkt_len;
+ }
+
+submit:
+ ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ if (ret && ret != -ENODEV) {
+ list_add_tail(&agg->list, next);
+ tasklet_schedule(&tp->tl);
+ }
+ }
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+}
+
+static void tx_bottom(struct r8152 *tp)
+{
+ int res;
+
+ do {
+ struct tx_agg *agg;
+
+ if (skb_queue_empty(&tp->tx_queue))
+ break;
+
+ agg = r8152_get_tx_agg(tp);
+ if (!agg)
+ break;
+
+ res = r8152_tx_agg_fill(tp, agg);
+ if (res) {
+ struct net_device_stats *stats;
+ struct net_device *netdev;
+ unsigned long flags;
+
+ netdev = tp->netdev;
+ stats = rtl8152_get_stats(netdev);
+
+ if (res == -ENODEV) {
+ netif_device_detach(netdev);
+ } else {
+ netif_warn(tp, tx_err, netdev,
+ "failed tx_urb %d\n", res);
+ stats->tx_dropped += agg->skb_num;
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
+ }
+ }
+ } while (res == 0);
+}
+
+static void bottom_half(unsigned long data)
{
struct r8152 *tp;
- int status = urb->status;
- tp = urb->context;
- if (!tp)
+ tp = (struct r8152 *)data;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
- dev_kfree_skb_irq(tp->tx_skb);
- if (!netif_device_present(tp->netdev))
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- if (status)
- dev_info(&urb->dev->dev, "%s: Tx status %d\n",
- tp->netdev->name, status);
- tp->netdev->trans_start = jiffies;
- netif_wake_queue(tp->netdev);
+
+ /* When link down, the driver would cancel all bulks. */
+ /* This avoid the re-submitting bulk */
+ if (!netif_carrier_ok(tp->netdev))
+ return;
+
+ rx_bottom(tp);
+ tx_bottom(tp);
+}
+
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
+{
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
+ agg->head, rx_buf_sz,
+ (usb_complete_t)read_bulk_callback, agg);
+
+ return usb_submit_urb(agg->urb, mem_flags);
}
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ int i;
+
netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
- usb_unlink_urb(tp->tx_urb);
- stats->tx_errors++;
+ for (i = 0; i < RTL8152_MAX_TX; i++)
+ usb_unlink_urb(tp->tx_info[i].urb);
}
static void rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- if (tp->speed & LINK_STATUS)
+ if (tp->speed & LINK_STATUS) {
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
}
static void _rtl8152_set_rx_mode(struct net_device *netdev)
@@ -923,33 +1385,39 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
{
struct r8152 *tp = netdev_priv(netdev);
struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ unsigned long flags;
+ struct tx_agg *agg = NULL;
struct tx_desc *tx_desc;
unsigned int len;
+ u8 *tx_data;
int res;
- netif_stop_queue(netdev);
- len = skb->len;
- if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) {
- struct sk_buff *tx_skb;
+ skb_tx_timestamp(skb);
- tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC);
- dev_kfree_skb_any(skb);
- if (!tx_skb) {
- stats->tx_dropped++;
- netif_wake_queue(netdev);
- return NETDEV_TX_OK;
- }
- skb = tx_skb;
+ /* If tx_queue is not empty, it means at least one previous packt */
+ /* is waiting for sending. Don't send current one before it. */
+ if (skb_queue_empty(&tp->tx_queue))
+ agg = r8152_get_tx_agg(tp);
+
+ if (!agg) {
+ skb_queue_tail(&tp->tx_queue, skb);
+ return NETDEV_TX_OK;
}
- tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc));
- memset(tx_desc, 0, sizeof(*tx_desc));
- tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS);
- tp->tx_skb = skb;
- skb_tx_timestamp(skb);
- usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
- skb->data, skb->len,
- (usb_complete_t)write_bulk_callback, tp);
- res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC);
+
+ tx_desc = (struct tx_desc *)agg->head;
+ tx_data = agg->head + sizeof(*tx_desc);
+ agg->skb_num = agg->skb_len = 0;
+
+ len = skb->len;
+ r8152_tx_csum(tp, tx_desc, skb);
+ memcpy(tx_data, skb->data, len);
+ dev_kfree_skb_any(skb);
+ agg->skb_num++;
+ agg->skb_len += len;
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ agg->head, len + sizeof(*tx_desc),
+ (usb_complete_t)write_bulk_callback, agg);
+ res = usb_submit_urb(agg->urb, GFP_ATOMIC);
if (res) {
/* Can we get/handle EPIPE here? */
if (res == -ENODEV) {
@@ -957,12 +1425,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
} else {
netif_warn(tp, tx_err, netdev,
"failed tx_urb %d\n", res);
- stats->tx_errors++;
- netif_start_queue(netdev);
+ stats->tx_dropped++;
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
}
- } else {
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
}
return NETDEV_TX_OK;
@@ -999,17 +1466,18 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
static int rtl8152_enable(struct r8152 *tp)
{
- u32 ocp_data;
+ u32 ocp_data;
+ int i, ret;
u8 speed;
speed = rtl8152_get_speed(tp);
- if (speed & _100bps) {
+ if (speed & _10bps) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
- ocp_data &= ~EEEP_CR_EEEP_TX;
+ ocp_data |= EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
} else {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
- ocp_data |= EEEP_CR_EEEP_TX;
+ ocp_data &= ~EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
}
@@ -1023,23 +1491,34 @@ static int rtl8152_enable(struct r8152 *tp)
ocp_data &= ~RXDY_GATED_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
- usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
- (usb_complete_t)read_bulk_callback, tp);
+ INIT_LIST_HEAD(&tp->rx_done);
+ ret = 0;
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ INIT_LIST_HEAD(&tp->rx_info[i].list);
+ ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
+ }
- return usb_submit_urb(tp->rx_urb, GFP_KERNEL);
+ return ret;
}
static void rtl8152_disable(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
+ struct sk_buff *skb;
+ u32 ocp_data;
+ int i;
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- usb_kill_urb(tp->tx_urb);
+ while ((skb = skb_dequeue(&tp->tx_queue))) {
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++)
+ usb_kill_urb(tp->tx_info[i].urb);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
ocp_data |= RXDY_GATED_EN;
@@ -1058,7 +1537,8 @@ static void rtl8152_disable(struct r8152 *tp)
mdelay(1);
}
- usb_kill_urb(tp->rx_urb);
+ for (i = 0; i < RTL8152_MAX_RX; i++)
+ usb_kill_urb(tp->rx_info[i].urb);
rtl8152_nic_reset(tp);
}
@@ -1269,7 +1749,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
r8152_mdio_write(tp, MII_BMCR, bmcr);
out:
- schedule_delayed_work(&tp->schedule, 5 * HZ);
return ret;
}
@@ -1292,6 +1771,7 @@ static void set_carrier(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
u8 speed;
+ clear_bit(RTL8152_LINK_CHG, &tp->flags);
speed = rtl8152_get_speed(tp);
if (speed & LINK_STATUS) {
@@ -1303,7 +1783,9 @@ static void set_carrier(struct r8152 *tp)
} else {
if (tp->speed & LINK_STATUS) {
netif_carrier_off(netdev);
+ tasklet_disable(&tp->tl);
rtl8152_disable(tp);
+ tasklet_enable(&tp->tl);
}
}
tp->speed = speed;
@@ -1319,13 +1801,12 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
goto out1;
- set_carrier(tp);
+ if (test_bit(RTL8152_LINK_CHG, &tp->flags))
+ set_carrier(tp);
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
- schedule_delayed_work(&tp->schedule, HZ);
-
out1:
return;
}
@@ -1335,28 +1816,20 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- tp->speed = rtl8152_get_speed(tp);
- if (tp->speed & LINK_STATUS) {
- res = rtl8152_enable(tp);
- if (res) {
- if (res == -ENODEV)
- netif_device_detach(tp->netdev);
-
- netif_err(tp, ifup, netdev,
- "rtl8152_open failed: %d\n", res);
- return res;
- }
-
- netif_carrier_on(netdev);
- } else {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
+ res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+ if (res) {
+ if (res == -ENODEV)
+ netif_device_detach(tp->netdev);
+ netif_warn(tp, ifup, netdev,
+ "intr_urb submit failed: %d\n", res);
+ return res;
}
rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(netdev);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
- schedule_delayed_work(&tp->schedule, 0);
return res;
}
@@ -1366,10 +1839,13 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
+ tasklet_disable(&tp->tl);
rtl8152_disable(tp);
+ tasklet_enable(&tp->tl);
return res;
}
@@ -1429,8 +1905,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
static void r8152b_init(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
rtl_clear_bp(tp);
@@ -1475,9 +1951,9 @@ static void r8152b_init(struct r8152 *tp)
break;
}
- /* disable rx aggregation */
+ /* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
- ocp_data |= RX_AGG_DISABLE;
+ ocp_data &= ~RX_AGG_DISABLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
@@ -1489,7 +1965,9 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
if (netif_running(tp->netdev)) {
clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+ tasklet_disable(&tp->tl);
}
rtl8152_down(tp);
@@ -1504,10 +1982,12 @@ static int rtl8152_resume(struct usb_interface *intf)
r8152b_init(tp);
netif_device_attach(tp->netdev);
if (netif_running(tp->netdev)) {
- rtl8152_enable(tp);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
- set_bit(RTL8152_SET_RX_MODE, &tp->flags);
- schedule_delayed_work(&tp->schedule, 0);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+ tasklet_enable(&tp->tl);
}
return 0;
@@ -1619,6 +2099,7 @@ static int rtl8152_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct r8152 *tp;
struct net_device *netdev;
+ int ret;
if (udev->actconfig->desc.bConfigurationValue != 1) {
usb_driver_set_configuration(udev, 1);
@@ -1631,19 +2112,22 @@ static int rtl8152_probe(struct usb_interface *intf,
return -ENOMEM;
}
+ SET_NETDEV_DEV(netdev, &intf->dev);
tp = netdev_priv(netdev);
tp->msg_enable = 0x7FFF;
- tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp);
+ tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
tp->udev = udev;
tp->netdev = netdev;
+ tp->intf = intf;
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
- netdev->features &= ~NETIF_F_IP_CSUM;
+
+ netdev->features |= NETIF_F_IP_CSUM;
+ netdev->hw_features = NETIF_F_IP_CSUM;
SET_ETHTOOL_OPS(netdev, &ops);
- tp->speed = 0;
tp->mii.dev = netdev;
tp->mii.mdio_read = read_mii_word;
@@ -1657,37 +2141,27 @@ static int rtl8152_probe(struct usb_interface *intf,
r8152b_init(tp);
set_ethernet_addr(tp);
- if (!alloc_all_urbs(tp)) {
- netif_err(tp, probe, netdev, "out of memory");
+ ret = alloc_all_mem(tp);
+ if (ret)
goto out;
- }
-
- tp->rx_skb = netdev_alloc_skb(netdev,
- RTL8152_RMS + sizeof(struct rx_desc));
- if (!tp->rx_skb)
- goto out1;
usb_set_intfdata(intf, tp);
- SET_NETDEV_DEV(netdev, &intf->dev);
-
- if (register_netdev(netdev) != 0) {
+ ret = register_netdev(netdev);
+ if (ret != 0) {
netif_err(tp, probe, netdev, "couldn't register the device");
- goto out2;
+ goto out1;
}
netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
return 0;
-out2:
- usb_set_intfdata(intf, NULL);
- dev_kfree_skb(tp->rx_skb);
out1:
- free_all_urbs(tp);
+ usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);
- return -EIO;
+ return ret;
}
static void rtl8152_unload(struct r8152 *tp)
@@ -1715,9 +2189,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
rtl8152_unload(tp);
- free_all_urbs(tp);
- if (tp->rx_skb)
- dev_kfree_skb(tp->rx_skb);
+ free_all_mem(tp);
free_netdev(tp->netdev);
}
}
@@ -1732,11 +2204,12 @@ MODULE_DEVICE_TABLE(usb, rtl8152_table);
static struct usb_driver rtl8152_driver = {
.name = MODULENAME,
+ .id_table = rtl8152_table,
.probe = rtl8152_probe,
.disconnect = rtl8152_disconnect,
- .id_table = rtl8152_table,
.suspend = rtl8152_suspend,
- .resume = rtl8152_resume
+ .resume = rtl8152_resume,
+ .reset_resume = rtl8152_resume,
};
module_usb_driver(rtl8152_driver);
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
new file mode 100644
index 000000000000..7ec3e0ee0783
--- /dev/null
+++ b/drivers/net/usb/sr9700.c
@@ -0,0 +1,560 @@
+/*
+ * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * Based on dm9601.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+
+#include "sr9700.h"
+
+static int sr_read(struct usbnet *dev, u8 reg, u16 length, void *data)
+{
+ int err;
+
+ err = usbnet_read_cmd(dev, SR_RD_REGS, SR_REQ_RD_REG, 0, reg, data,
+ length);
+ if ((err != length) && (err >= 0))
+ err = -EINVAL;
+ return err;
+}
+
+static int sr_write(struct usbnet *dev, u8 reg, u16 length, void *data)
+{
+ int err;
+
+ err = usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data,
+ length);
+ if ((err >= 0) && (err < length))
+ err = -EINVAL;
+ return err;
+}
+
+static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value)
+{
+ return sr_read(dev, reg, 1, value);
+}
+
+static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
+{
+ return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ value, reg, NULL, 0);
+}
+
+static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
+{
+ usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ 0, reg, data, length);
+}
+
+static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
+{
+ usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ value, reg, NULL, 0);
+}
+
+static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
+{
+ int i;
+
+ for (i = 0; i < SR_SHARE_TIMEOUT; i++) {
+ u8 tmp = 0;
+ int ret;
+
+ udelay(1);
+ ret = sr_read_reg(dev, EPCR, &tmp);
+ if (ret < 0)
+ return ret;
+
+ /* ready */
+ if (!(tmp & EPCR_ERRE))
+ return 0;
+ }
+
+ netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom");
+
+ return -EIO;
+}
+
+static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
+ __le16 *value)
+{
+ int ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+ sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
+
+ ret = wait_phy_eeprom_ready(dev, phy);
+ if (ret < 0)
+ goto out_unlock;
+
+ sr_write_reg(dev, EPCR, 0x0);
+ ret = sr_read(dev, EPDR, 2, value);
+
+ netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
+ phy, reg, *value, ret);
+
+out_unlock:
+ mutex_unlock(&dev->phy_mutex);
+ return ret;
+}
+
+static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
+ __le16 value)
+{
+ int ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ ret = sr_write(dev, EPDR, 2, &value);
+ if (ret < 0)
+ goto out_unlock;
+
+ sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+ sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
+ (EPCR_WEP | EPCR_ERPRW));
+
+ ret = wait_phy_eeprom_ready(dev, phy);
+ if (ret < 0)
+ goto out_unlock;
+
+ sr_write_reg(dev, EPCR, 0x0);
+
+out_unlock:
+ mutex_unlock(&dev->phy_mutex);
+ return ret;
+}
+
+static int sr_read_eeprom_word(struct usbnet *dev, u8 offset, void *value)
+{
+ return sr_share_read_word(dev, 0, offset, value);
+}
+
+static int sr9700_get_eeprom_len(struct net_device *netdev)
+{
+ return SR_EEPROM_LEN;
+}
+
+static int sr9700_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 *buf = (__le16 *)data;
+ int ret = 0;
+ int i;
+
+ /* access is 16bit */
+ if ((eeprom->offset & 0x01) || (eeprom->len & 0x01))
+ return -EINVAL;
+
+ for (i = 0; i < eeprom->len / 2; i++) {
+ ret = sr_read_eeprom_word(dev, eeprom->offset / 2 + i, buf + i);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res;
+ int rc = 0;
+
+ if (phy_id) {
+ netdev_dbg(netdev, "Only internal phy supported\n");
+ return 0;
+ }
+
+ /* Access NSR_LINKST bit for link status instead of MII_BMSR */
+ if (loc == MII_BMSR) {
+ u8 value;
+
+ sr_read_reg(dev, NSR, &value);
+ if (value & NSR_LINKST)
+ rc = 1;
+ }
+ sr_share_read_word(dev, 1, loc, &res);
+ if (rc == 1)
+ res = le16_to_cpu(res) | BMSR_LSTATUS;
+ else
+ res = le16_to_cpu(res) & ~BMSR_LSTATUS;
+
+ netdev_dbg(netdev, "sr_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+ phy_id, loc, res);
+
+ return res;
+}
+
+static void sr_mdio_write(struct net_device *netdev, int phy_id, int loc,
+ int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res = cpu_to_le16(val);
+
+ if (phy_id) {
+ netdev_dbg(netdev, "Only internal phy supported\n");
+ return;
+ }
+
+ netdev_dbg(netdev, "sr_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
+ phy_id, loc, val);
+
+ sr_share_write_word(dev, 1, loc, res);
+}
+
+static u32 sr9700_get_link(struct net_device *netdev)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ u8 value = 0;
+ int rc = 0;
+
+ /* Get the Link Status directly */
+ sr_read_reg(dev, NSR, &value);
+ if (value & NSR_LINKST)
+ rc = 1;
+
+ return rc;
+}
+
+static int sr9700_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static const struct ethtool_ops sr9700_ethtool_ops = {
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_link = sr9700_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_eeprom_len = sr9700_get_eeprom_len,
+ .get_eeprom = sr9700_get_eeprom,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static void sr9700_set_multicast(struct net_device *netdev)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ /* We use the 20 byte dev->data for our 8 byte filter buffer
+ * to avoid allocating memory that is tricky to free later
+ */
+ u8 *hashes = (u8 *)&dev->data;
+ /* rx_ctl setting : enable, disable_long, disable_crc */
+ u8 rx_ctl = RCR_RXEN | RCR_DIS_CRC | RCR_DIS_LONG;
+
+ memset(hashes, 0x00, SR_MCAST_SIZE);
+ /* broadcast address */
+ hashes[SR_MCAST_SIZE - 1] |= SR_MCAST_ADDR_FLAG;
+ if (netdev->flags & IFF_PROMISC) {
+ rx_ctl |= RCR_PRMSC;
+ } else if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > SR_MCAST_MAX) {
+ rx_ctl |= RCR_RUNT;
+ } else if (!netdev_mc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ hashes[crc >> 3] |= 1 << (crc & 0x7);
+ }
+ }
+
+ sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes);
+ sr_write_reg_async(dev, RCR, rx_ctl);
+}
+
+static int sr9700_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ netdev_err(netdev, "not setting invalid mac address %pM\n",
+ addr->sa_data);
+ return -EINVAL;
+ }
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ sr_write_async(dev, PAR, 6, netdev->dev_addr);
+
+ return 0;
+}
+
+static const struct net_device_ops sr9700_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = sr9700_ioctl,
+ .ndo_set_rx_mode = sr9700_set_multicast,
+ .ndo_set_mac_address = sr9700_set_mac_address,
+};
+
+static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct net_device *netdev;
+ struct mii_if_info *mii;
+ int ret;
+
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
+
+ netdev = dev->net;
+
+ netdev->netdev_ops = &sr9700_netdev_ops;
+ netdev->ethtool_ops = &sr9700_ethtool_ops;
+ netdev->hard_header_len += SR_TX_OVERHEAD;
+ dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
+ /* bulkin buffer is preferably not less than 3K */
+ dev->rx_urb_size = 3072;
+
+ mii = &dev->mii;
+ mii->dev = netdev;
+ mii->mdio_read = sr_mdio_read;
+ mii->mdio_write = sr_mdio_write;
+ mii->phy_id_mask = 0x1f;
+ mii->reg_num_mask = 0x1f;
+
+ sr_write_reg(dev, NCR, NCR_RST);
+ udelay(20);
+
+ /* read MAC
+ * After Chip Power on, the Chip will reload the MAC from
+ * EEPROM automatically to PAR. In case there is no EEPROM externally,
+ * a default MAC address is stored in PAR for making chip work properly.
+ */
+ if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) {
+ netdev_err(netdev, "Error reading MAC address\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* power up and reset phy */
+ sr_write_reg(dev, PRR, PRR_PHY_RST);
+ /* at least 10ms, here 20ms for safe */
+ mdelay(20);
+ sr_write_reg(dev, PRR, 0);
+ /* at least 1ms, here 2ms for reading right register */
+ udelay(2 * 1000);
+
+ /* receive broadcast packets */
+ sr9700_set_multicast(netdev);
+
+ sr_mdio_write(netdev, mii->phy_id, MII_BMCR, BMCR_RESET);
+ sr_mdio_write(netdev, mii->phy_id, MII_ADVERTISE, ADVERTISE_ALL |
+ ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
+ mii_nway_restart(mii);
+
+out:
+ return ret;
+}
+
+static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct sk_buff *sr_skb;
+ int len;
+
+ /* skb content (packets) format :
+ * p0 p1 p2 ...... pm
+ * / \
+ * / \
+ * / \
+ * / \
+ * p0b0 p0b1 p0b2 p0b3 ...... p0b(n-4) p0b(n-3)...p0bn
+ *
+ * p0 : packet 0
+ * p0b0 : packet 0 byte 0
+ *
+ * b0: rx status
+ * b1: packet length (incl crc) low
+ * b2: packet length (incl crc) high
+ * b3..n-4: packet data
+ * bn-3..bn: ethernet packet crc
+ */
+ if (unlikely(skb->len < SR_RX_OVERHEAD)) {
+ netdev_err(dev->net, "unexpected tiny rx frame\n");
+ return 0;
+ }
+
+ /* one skb may contains multiple packets */
+ while (skb->len > SR_RX_OVERHEAD) {
+ if (skb->data[0] != 0x40)
+ return 0;
+
+ /* ignore the CRC length */
+ len = (skb->data[1] | (skb->data[2] << 8)) - 4;
+
+ if (len > ETH_FRAME_LEN)
+ return 0;
+
+ /* the last packet of current skb */
+ if (skb->len == (len + SR_RX_OVERHEAD)) {
+ skb_pull(skb, 3);
+ skb->len = len;
+ skb_set_tail_pointer(skb, len);
+ skb->truesize = len + sizeof(struct sk_buff);
+ return 2;
+ }
+
+ /* skb_clone is used for address align */
+ sr_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!sr_skb)
+ return 0;
+
+ sr_skb->len = len;
+ sr_skb->data = skb->data + 3;
+ skb_set_tail_pointer(sr_skb, len);
+ sr_skb->truesize = len + sizeof(struct sk_buff);
+ usbnet_skb_return(dev, sr_skb);
+
+ skb_pull(skb, len + SR_RX_OVERHEAD);
+ };
+
+ return 0;
+}
+
+static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int len;
+
+ /* SR9700 can only send out one ethernet packet at once.
+ *
+ * b0 b1 b2 b3 ...... b(n-4) b(n-3)...bn
+ *
+ * b0: rx status
+ * b1: packet length (incl crc) low
+ * b2: packet length (incl crc) high
+ * b3..n-4: packet data
+ * bn-3..bn: ethernet packet crc
+ */
+
+ len = skb->len;
+
+ if (skb_headroom(skb) < SR_TX_OVERHEAD) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ __skb_push(skb, SR_TX_OVERHEAD);
+
+ /* usbnet adds padding if length is a multiple of packet size
+ * if so, adjust length value in header
+ */
+ if ((skb->len % dev->maxpacket) == 0)
+ len++;
+
+ skb->data[0] = len;
+ skb->data[1] = len >> 8;
+
+ return skb;
+}
+
+static void sr9700_status(struct usbnet *dev, struct urb *urb)
+{
+ int link;
+ u8 *buf;
+
+ /* format:
+ b0: net status
+ b1: tx status 1
+ b2: tx status 2
+ b3: rx status
+ b4: rx overflow
+ b5: rx count
+ b6: tx count
+ b7: gpr
+ */
+
+ if (urb->actual_length < 8)
+ return;
+
+ buf = urb->transfer_buffer;
+
+ link = !!(buf[0] & 0x40);
+ if (netif_carrier_ok(dev->net) != link) {
+ usbnet_link_change(dev, link, 1);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
+}
+
+static int sr9700_link_reset(struct usbnet *dev)
+{
+ struct ethtool_cmd ecmd;
+
+ mii_check_media(&dev->mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+
+ netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n",
+ ecmd.speed, ecmd.duplex);
+
+ return 0;
+}
+
+static const struct driver_info sr9700_driver_info = {
+ .description = "CoreChip SR9700 USB Ethernet",
+ .flags = FLAG_ETHER,
+ .bind = sr9700_bind,
+ .rx_fixup = sr9700_rx_fixup,
+ .tx_fixup = sr9700_tx_fixup,
+ .status = sr9700_status,
+ .link_reset = sr9700_link_reset,
+ .reset = sr9700_link_reset,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE(0x0fe6, 0x9700), /* SR9700 device */
+ .driver_info = (unsigned long)&sr9700_driver_info,
+ },
+ {}, /* END */
+};
+
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver sr9700_usb_driver = {
+ .name = "sr9700",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(sr9700_usb_driver);
+
+MODULE_AUTHOR("liujl <liujunliang_ljl@163.com>");
+MODULE_DESCRIPTION("SR9700 one chip USB 1.1 USB to Ethernet device from http://www.corechip-sz.com/");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
new file mode 100644
index 000000000000..fd687c575e74
--- /dev/null
+++ b/drivers/net/usb/sr9700.h
@@ -0,0 +1,173 @@
+/*
+ * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _SR9700_H
+#define _SR9700_H
+
+/* sr9700 spec. register table on Linux platform */
+
+/* Network Control Reg */
+#define NCR 0x00
+#define NCR_RST (1 << 0)
+#define NCR_LBK (3 << 1)
+#define NCR_FDX (1 << 3)
+#define NCR_WAKEEN (1 << 6)
+/* Network Status Reg */
+#define NSR 0x01
+#define NSR_RXRDY (1 << 0)
+#define NSR_RXOV (1 << 1)
+#define NSR_TX1END (1 << 2)
+#define NSR_TX2END (1 << 3)
+#define NSR_TXFULL (1 << 4)
+#define NSR_WAKEST (1 << 5)
+#define NSR_LINKST (1 << 6)
+#define NSR_SPEED (1 << 7)
+/* Tx Control Reg */
+#define TCR 0x02
+#define TCR_CRC_DIS (1 << 1)
+#define TCR_PAD_DIS (1 << 2)
+#define TCR_LC_CARE (1 << 3)
+#define TCR_CRS_CARE (1 << 4)
+#define TCR_EXCECM (1 << 5)
+#define TCR_LF_EN (1 << 6)
+/* Tx Status Reg for Packet Index 1 */
+#define TSR1 0x03
+#define TSR1_EC (1 << 2)
+#define TSR1_COL (1 << 3)
+#define TSR1_LC (1 << 4)
+#define TSR1_NC (1 << 5)
+#define TSR1_LOC (1 << 6)
+#define TSR1_TLF (1 << 7)
+/* Tx Status Reg for Packet Index 2 */
+#define TSR2 0x04
+#define TSR2_EC (1 << 2)
+#define TSR2_COL (1 << 3)
+#define TSR2_LC (1 << 4)
+#define TSR2_NC (1 << 5)
+#define TSR2_LOC (1 << 6)
+#define TSR2_TLF (1 << 7)
+/* Rx Control Reg*/
+#define RCR 0x05
+#define RCR_RXEN (1 << 0)
+#define RCR_PRMSC (1 << 1)
+#define RCR_RUNT (1 << 2)
+#define RCR_ALL (1 << 3)
+#define RCR_DIS_CRC (1 << 4)
+#define RCR_DIS_LONG (1 << 5)
+/* Rx Status Reg */
+#define RSR 0x06
+#define RSR_AE (1 << 2)
+#define RSR_MF (1 << 6)
+#define RSR_RF (1 << 7)
+/* Rx Overflow Counter Reg */
+#define ROCR 0x07
+#define ROCR_ROC (0x7F << 0)
+#define ROCR_RXFU (1 << 7)
+/* Back Pressure Threshold Reg */
+#define BPTR 0x08
+#define BPTR_JPT (0x0F << 0)
+#define BPTR_BPHW (0x0F << 4)
+/* Flow Control Threshold Reg */
+#define FCTR 0x09
+#define FCTR_LWOT (0x0F << 0)
+#define FCTR_HWOT (0x0F << 4)
+/* rx/tx Flow Control Reg */
+#define FCR 0x0A
+#define FCR_FLCE (1 << 0)
+#define FCR_BKPA (1 << 4)
+#define FCR_TXPEN (1 << 5)
+#define FCR_TXPF (1 << 6)
+#define FCR_TXP0 (1 << 7)
+/* Eeprom & Phy Control Reg */
+#define EPCR 0x0B
+#define EPCR_ERRE (1 << 0)
+#define EPCR_ERPRW (1 << 1)
+#define EPCR_ERPRR (1 << 2)
+#define EPCR_EPOS (1 << 3)
+#define EPCR_WEP (1 << 4)
+/* Eeprom & Phy Address Reg */
+#define EPAR 0x0C
+#define EPAR_EROA (0x3F << 0)
+#define EPAR_PHY_ADR_MASK (0x03 << 6)
+#define EPAR_PHY_ADR (0x01 << 6)
+/* Eeprom & Phy Data Reg */
+#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
+/* Wakeup Control Reg */
+#define WCR 0x0F
+#define WCR_MAGICST (1 << 0)
+#define WCR_LINKST (1 << 2)
+#define WCR_MAGICEN (1 << 3)
+#define WCR_LINKEN (1 << 5)
+/* Physical Address Reg */
+#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
+/* Multicast Address Reg */
+#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
+/* 0x1e unused */
+/* Phy Reset Reg */
+#define PRR 0x1F
+#define PRR_PHY_RST (1 << 0)
+/* Tx sdram Write Pointer Address Low */
+#define TWPAL 0x20
+/* Tx sdram Write Pointer Address High */
+#define TWPAH 0x21
+/* Tx sdram Read Pointer Address Low */
+#define TRPAL 0x22
+/* Tx sdram Read Pointer Address High */
+#define TRPAH 0x23
+/* Rx sdram Write Pointer Address Low */
+#define RWPAL 0x24
+/* Rx sdram Write Pointer Address High */
+#define RWPAH 0x25
+/* Rx sdram Read Pointer Address Low */
+#define RRPAL 0x26
+/* Rx sdram Read Pointer Address High */
+#define RRPAH 0x27
+/* Vendor ID register */
+#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
+/* Product ID register */
+#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
+/* CHIP Revision register */
+#define CHIPR 0x2C
+/* 0x2D --> 0xEF unused */
+/* USB Device Address */
+#define USBDA 0xF0
+#define USBDA_USBFA (0x7F << 0)
+/* RX packet Counter Reg */
+#define RXC 0xF1
+/* Tx packet Counter & USB Status Reg */
+#define TXC_USBS 0xF2
+#define TXC_USBS_TXC0 (1 << 0)
+#define TXC_USBS_TXC1 (1 << 1)
+#define TXC_USBS_TXC2 (1 << 2)
+#define TXC_USBS_EP1RDY (1 << 5)
+#define TXC_USBS_SUSFLAG (1 << 6)
+#define TXC_USBS_RXFAULT (1 << 7)
+/* USB Control register */
+#define USBC 0xF4
+#define USBC_EP3NAK (1 << 4)
+#define USBC_EP3ACK (1 << 5)
+
+/* Register access commands and flags */
+#define SR_RD_REGS 0x00
+#define SR_WR_REGS 0x01
+#define SR_WR_REG 0x03
+#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+
+/* parameters */
+#define SR_SHARE_TIMEOUT 1000
+#define SR_EEPROM_LEN 256
+#define SR_MCAST_SIZE 8
+#define SR_MCAST_ADDR_FLAG 0x80
+#define SR_MCAST_MAX 64
+#define SR_TX_OVERHEAD 2 /* 2bytes header */
+#define SR_RX_OVERHEAD 7 /* 3bytes header + 4crc tail */
+
+#endif /* _SR9700_H */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 27a00b006033..7b331e613e02 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -59,15 +59,13 @@
* For high speed, each frame comfortably fits almost 36 max size
* Ethernet packets (so queues should be bigger).
*
- * REVISIT qlens should be members of 'struct usbnet'; the goal is to
- * let the USB host controller be busy for 5msec or more before an irq
- * is required, under load. Jumbograms change the equation.
+ * The goal is to let the USB host controller be busy for 5msec or
+ * more before an irq is required, under load. Jumbograms change
+ * the equation.
*/
-#define RX_MAX_QUEUE_MEMORY (60 * 1518)
-#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
- (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
-#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
- (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
+#define MAX_QUEUE_MEMORY (60 * 1518)
+#define RX_QLEN(dev) ((dev)->rx_qlen)
+#define TX_QLEN(dev) ((dev)->tx_qlen)
// reawaken network queue this soon after stopping; else watchdog barks
#define TX_TIMEOUT_JIFFIES (5*HZ)
@@ -347,6 +345,31 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(usbnet_skb_return);
+/* must be called if hard_mtu or rx_urb_size changed */
+void usbnet_update_max_qlen(struct usbnet *dev)
+{
+ enum usb_device_speed speed = dev->udev->speed;
+
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
+ dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
+ break;
+ case USB_SPEED_SUPER:
+ /*
+ * Not take default 5ms qlen for super speed HC to
+ * save memory, and iperf tests show 2.5ms qlen can
+ * work well
+ */
+ dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size;
+ dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
+ break;
+ default:
+ dev->rx_qlen = dev->tx_qlen = 4;
+ }
+}
+EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
+
/*-------------------------------------------------------------------------
*
@@ -375,6 +398,9 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
usbnet_unlink_rx_urbs(dev);
}
+ /* max qlen depend on hard_mtu and rx_urb_size */
+ usbnet_update_max_qlen(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_change_mtu);
@@ -843,6 +869,9 @@ int usbnet_open (struct net_device *net)
goto done;
}
+ /* hard_mtu or rx_urb_size may change in reset() */
+ usbnet_update_max_qlen(dev);
+
// insist peer be connected
if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
@@ -927,6 +956,9 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
if (dev->driver_info->link_reset)
dev->driver_info->link_reset(dev);
+ /* hard_mtu or rx_urb_size may change in link_reset() */
+ usbnet_update_max_qlen(dev);
+
return retval;
}
@@ -1020,6 +1052,9 @@ static void __handle_link_change(struct usbnet *dev)
tasklet_schedule(&dev->bh);
}
+ /* hard_mtu or rx_urb_size may change during link change */
+ usbnet_update_max_qlen(dev);
+
clear_bit(EVENT_LINK_CHANGE, &dev->flags);
}
@@ -1632,11 +1667,18 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->rx_urb_size = dev->hard_mtu;
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+ /* let userspace know we have a random address */
+ if (ether_addr_equal(net->dev_addr, node_id))
+ net->addr_assign_type = NET_ADDR_RANDOM;
+
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
SET_NETDEV_DEVTYPE(net, &wlan_type);
if ((dev->driver_info->flags & FLAG_WWAN) != 0)
SET_NETDEV_DEVTYPE(net, &wwan_type);
+ /* initialize max rx_qlen and tx_qlen */
+ usbnet_update_max_qlen(dev);
+
status = register_netdev (net);
if (status)
goto out4;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3d2a90a62649..defec2b3c5a4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -106,6 +106,9 @@ struct virtnet_info {
/* Has control virtqueue */
bool has_cvq;
+ /* Host can handle any s/g split between our header and packet data */
+ bool any_header_sg;
+
/* enable config space updates */
bool config_enable;
@@ -669,12 +672,28 @@ static void free_old_xmit_skbs(struct send_queue *sq)
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
{
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+ struct skb_vnet_hdr *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned num_sg;
+ unsigned hdr_len;
+ bool can_push;
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
+ if (vi->mergeable_rx_bufs)
+ hdr_len = sizeof hdr->mhdr;
+ else
+ hdr_len = sizeof hdr->hdr;
+
+ can_push = vi->any_header_sg &&
+ !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
+ !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
+ /* Even if we can, don't push here yet as this would skew
+ * csum_start offset below. */
+ if (can_push)
+ hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
+ else
+ hdr = skb_vnet_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
@@ -703,15 +722,18 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
}
- hdr->mhdr.num_buffers = 0;
-
- /* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr);
- else
- sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
+ hdr->mhdr.num_buffers = 0;
- num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ if (can_push) {
+ __skb_push(skb, hdr_len);
+ num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+ /* Pull header back to avoid skew in tx bytes calculations. */
+ __skb_pull(skb, hdr_len);
+ } else {
+ sg_set_buf(sq->sg, hdr, hdr_len);
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ }
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
@@ -1516,6 +1538,8 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
/* (!csum && gso) case will be fixed by register_netdev() */
}
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
+ dev->features |= NETIF_F_RXCSUM;
dev->vlan_features = dev->features;
@@ -1552,6 +1576,9 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
+ if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
+ vi->any_header_sg = true;
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
@@ -1727,6 +1754,7 @@ static unsigned int features[] = {
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
VIRTIO_NET_F_CTRL_MAC_ADDR,
+ VIRTIO_F_ANY_LAYOUT,
};
static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 55a62cae2cb4..7e2788c488ed 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -313,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
struct pci_dev *pdev)
{
if (tbi->map_type == VMXNET3_MAP_SINGLE)
- pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
+ dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE);
else if (tbi->map_type == VMXNET3_MAP_PAGE)
- pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
+ dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE);
else
BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
@@ -429,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
if (tq->tx_ring.base) {
- pci_free_consistent(adapter->pdev, tq->tx_ring.size *
- sizeof(struct Vmxnet3_TxDesc),
- tq->tx_ring.base, tq->tx_ring.basePA);
+ dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
+ sizeof(struct Vmxnet3_TxDesc),
+ tq->tx_ring.base, tq->tx_ring.basePA);
tq->tx_ring.base = NULL;
}
if (tq->data_ring.base) {
- pci_free_consistent(adapter->pdev, tq->data_ring.size *
- sizeof(struct Vmxnet3_TxDataDesc),
- tq->data_ring.base, tq->data_ring.basePA);
+ dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
+ sizeof(struct Vmxnet3_TxDataDesc),
+ tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
if (tq->comp_ring.base) {
- pci_free_consistent(adapter->pdev, tq->comp_ring.size *
- sizeof(struct Vmxnet3_TxCompDesc),
- tq->comp_ring.base, tq->comp_ring.basePA);
+ dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
+ sizeof(struct Vmxnet3_TxCompDesc),
+ tq->comp_ring.base, tq->comp_ring.basePA);
tq->comp_ring.base = NULL;
}
- kfree(tq->buf_info);
- tq->buf_info = NULL;
+ if (tq->buf_info) {
+ dma_free_coherent(&adapter->pdev->dev,
+ tq->tx_ring.size * sizeof(tq->buf_info[0]),
+ tq->buf_info, tq->buf_info_pa);
+ tq->buf_info = NULL;
+ }
}
@@ -496,37 +500,38 @@ static int
vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
+ size_t sz;
+
BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
tq->comp_ring.base || tq->buf_info);
- tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
- * sizeof(struct Vmxnet3_TxDesc),
- &tq->tx_ring.basePA);
+ tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
+ &tq->tx_ring.basePA, GFP_KERNEL);
if (!tq->tx_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx ring\n");
goto err;
}
- tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
- tq->data_ring.size *
- sizeof(struct Vmxnet3_TxDataDesc),
- &tq->data_ring.basePA);
+ tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
+ &tq->data_ring.basePA, GFP_KERNEL);
if (!tq->data_ring.base) {
netdev_err(adapter->netdev, "failed to allocate data ring\n");
goto err;
}
- tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
- tq->comp_ring.size *
- sizeof(struct Vmxnet3_TxCompDesc),
- &tq->comp_ring.basePA);
+ tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
+ &tq->comp_ring.basePA, GFP_KERNEL);
if (!tq->comp_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
goto err;
}
- tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
- GFP_KERNEL);
+ sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
+ tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
+ &tq->buf_info_pa, GFP_KERNEL);
if (!tq->buf_info)
goto err;
@@ -578,7 +583,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
break;
}
- rbi->dma_addr = pci_map_single(adapter->pdev,
+ rbi->dma_addr = dma_map_single(
+ &adapter->pdev->dev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
} else {
@@ -595,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
rq->stats.rx_buf_alloc_failure++;
break;
}
- rbi->dma_addr = pci_map_page(adapter->pdev,
+ rbi->dma_addr = dma_map_page(
+ &adapter->pdev->dev,
rbi->page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
} else {
@@ -705,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_SINGLE;
- tbi->dma_addr = pci_map_single(adapter->pdev,
+ tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
skb->data + buf_offset, buf_size,
PCI_DMA_TODEVICE);
@@ -1221,7 +1228,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
goto rcd_done;
}
- pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
+ dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
+ rbi->len,
PCI_DMA_FROMDEVICE);
#ifdef VMXNET3_RSS
@@ -1233,7 +1241,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */
rbi->skb = new_skb;
- rbi->dma_addr = pci_map_single(adapter->pdev,
+ rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
@@ -1267,7 +1275,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
}
if (rcd->len) {
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
@@ -1276,7 +1284,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */
rbi->page = new_page;
- rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
+ rbi->dma_addr = dma_map_page(&adapter->pdev->dev,
+ rbi->page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
@@ -1352,13 +1361,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
rq->buf_info[ring_idx][i].skb) {
- pci_unmap_single(adapter->pdev, rxd->addr,
+ dma_unmap_single(&adapter->pdev->dev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
rq->buf_info[ring_idx][i].skb = NULL;
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
rq->buf_info[ring_idx][i].page) {
- pci_unmap_page(adapter->pdev, rxd->addr,
+ dma_unmap_page(&adapter->pdev->dev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE);
put_page(rq->buf_info[ring_idx][i].page);
rq->buf_info[ring_idx][i].page = NULL;
@@ -1400,25 +1409,31 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
- kfree(rq->buf_info[0]);
-
for (i = 0; i < 2; i++) {
if (rq->rx_ring[i].base) {
- pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
- * sizeof(struct Vmxnet3_RxDesc),
- rq->rx_ring[i].base,
- rq->rx_ring[i].basePA);
+ dma_free_coherent(&adapter->pdev->dev,
+ rq->rx_ring[i].size
+ * sizeof(struct Vmxnet3_RxDesc),
+ rq->rx_ring[i].base,
+ rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL;
}
rq->buf_info[i] = NULL;
}
if (rq->comp_ring.base) {
- pci_free_consistent(adapter->pdev, rq->comp_ring.size *
- sizeof(struct Vmxnet3_RxCompDesc),
- rq->comp_ring.base, rq->comp_ring.basePA);
+ dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
+ * sizeof(struct Vmxnet3_RxCompDesc),
+ rq->comp_ring.base, rq->comp_ring.basePA);
rq->comp_ring.base = NULL;
}
+
+ if (rq->buf_info[0]) {
+ size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
+ (rq->rx_ring[0].size + rq->rx_ring[1].size);
+ dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
+ rq->buf_info_pa);
+ }
}
@@ -1503,8 +1518,10 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
for (i = 0; i < 2; i++) {
sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
- rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
- &rq->rx_ring[i].basePA);
+ rq->rx_ring[i].base = dma_alloc_coherent(
+ &adapter->pdev->dev, sz,
+ &rq->rx_ring[i].basePA,
+ GFP_KERNEL);
if (!rq->rx_ring[i].base) {
netdev_err(adapter->netdev,
"failed to allocate rx ring %d\n", i);
@@ -1513,8 +1530,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
}
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
- rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
- &rq->comp_ring.basePA);
+ rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &rq->comp_ring.basePA,
+ GFP_KERNEL);
if (!rq->comp_ring.base) {
netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
goto err;
@@ -1522,7 +1540,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
- bi = kzalloc(sz, GFP_KERNEL);
+ bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
+ GFP_KERNEL);
if (!bi)
goto err;
@@ -2005,6 +2024,7 @@ vmxnet3_set_mc(struct net_device *netdev)
struct Vmxnet3_RxFilterConf *rxConf =
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
+ dma_addr_t new_table_pa = 0;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) {
@@ -2028,8 +2048,12 @@ vmxnet3_set_mc(struct net_device *netdev)
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
netdev_mc_count(netdev) * ETH_ALEN);
- rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
- new_table));
+ new_table_pa = dma_map_single(
+ &adapter->pdev->dev,
+ new_table,
+ rxConf->mfTableLen,
+ PCI_DMA_TODEVICE);
+ rxConf->mfTablePA = cpu_to_le64(new_table_pa);
} else {
netdev_info(netdev, "failed to copy mcast list"
", setting ALL_MULTI\n");
@@ -2056,7 +2080,11 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- kfree(new_table);
+ if (new_table) {
+ dma_unmap_single(&adapter->pdev->dev, new_table_pa,
+ rxConf->mfTableLen, PCI_DMA_TODEVICE);
+ kfree(new_table);
+ }
}
void
@@ -2096,7 +2124,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
- devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
+ devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
/* set up feature flags */
@@ -2125,7 +2153,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
- tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
+ tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
@@ -2143,8 +2171,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
- rqc->ddPA = cpu_to_le64(virt_to_phys(
- rq->buf_info));
+ rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
@@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
- devRead->rssConfDesc.confLen = sizeof(*rssConf);
- devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
+ devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
+ devRead->rssConfDesc.confPA =
+ cpu_to_le64(adapter->rss_conf_pa);
}
#endif /* VMXNET3_RSS */
@@ -2948,9 +2976,13 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->pdev = pdev;
spin_lock_init(&adapter->cmd_lock);
- adapter->shared = pci_alloc_consistent(adapter->pdev,
- sizeof(struct Vmxnet3_DriverShared),
- &adapter->shared_pa);
+ adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
+ sizeof(struct vmxnet3_adapter),
+ PCI_DMA_TODEVICE);
+ adapter->shared = dma_alloc_coherent(
+ &adapter->pdev->dev,
+ sizeof(struct Vmxnet3_DriverShared),
+ &adapter->shared_pa, GFP_KERNEL);
if (!adapter->shared) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
err = -ENOMEM;
@@ -2963,8 +2995,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
- adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
- &adapter->queue_desc_pa);
+ adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &adapter->queue_desc_pa,
+ GFP_KERNEL);
if (!adapter->tqd_start) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
@@ -2974,7 +3007,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
adapter->num_tx_queues);
- adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
+ adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_PMConf),
+ &adapter->pm_conf_pa,
+ GFP_KERNEL);
if (adapter->pm_conf == NULL) {
err = -ENOMEM;
goto err_alloc_pm;
@@ -2982,7 +3018,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#ifdef VMXNET3_RSS
- adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
+ adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct UPT1_RSSConf),
+ &adapter->rss_conf_pa,
+ GFP_KERNEL);
if (adapter->rss_conf == NULL) {
err = -ENOMEM;
goto err_alloc_rss;
@@ -3077,17 +3116,22 @@ err_ver:
vmxnet3_free_pci_resources(adapter);
err_alloc_pci:
#ifdef VMXNET3_RSS
- kfree(adapter->rss_conf);
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
+ adapter->rss_conf, adapter->rss_conf_pa);
err_alloc_rss:
#endif
- kfree(adapter->pm_conf);
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
+ adapter->pm_conf, adapter->pm_conf_pa);
err_alloc_pm:
- pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
- adapter->queue_desc_pa);
+ dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
err_alloc_queue_desc:
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
- adapter->shared, adapter->shared_pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_DriverShared),
+ adapter->shared, adapter->shared_pa);
err_alloc_shared:
+ dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
+ sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
@@ -3118,16 +3162,21 @@ vmxnet3_remove_device(struct pci_dev *pdev)
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
#ifdef VMXNET3_RSS
- kfree(adapter->rss_conf);
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
+ adapter->rss_conf, adapter->rss_conf_pa);
#endif
- kfree(adapter->pm_conf);
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
+ adapter->pm_conf, adapter->pm_conf_pa);
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
- pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
- adapter->queue_desc_pa);
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
- adapter->shared, adapter->shared_pa);
+ dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_DriverShared),
+ adapter->shared, adapter->shared_pa);
+ dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
+ sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
free_netdev(netdev);
}
@@ -3227,8 +3276,8 @@ skip_arp:
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
- pmConf));
+ adapter->shared->devRead.pmConfDesc.confPA =
+ cpu_to_le64(adapter->pm_conf_pa);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -3265,8 +3314,8 @@ vmxnet3_resume(struct device *device)
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
- pmConf));
+ adapter->shared->devRead.pmConfDesc.confPA =
+ cpu_to_le64(adapter->pm_conf_pa);
netif_device_attach(netdev);
pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 35418146fa17..a03f358fd58b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -70,10 +70,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01020000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -229,6 +229,7 @@ struct vmxnet3_tx_queue {
spinlock_t tx_lock;
struct vmxnet3_cmd_ring tx_ring;
struct vmxnet3_tx_buf_info *buf_info;
+ dma_addr_t buf_info_pa;
struct vmxnet3_tx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
struct Vmxnet3_TxQueueCtrl *shared;
@@ -277,6 +278,7 @@ struct vmxnet3_rx_queue {
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
struct vmxnet3_rx_buf_info *buf_info[2];
+ dma_addr_t buf_info_pa;
struct Vmxnet3_RxQueueCtrl *shared;
struct vmxnet3_rq_driver_stats stats;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
@@ -353,6 +355,10 @@ struct vmxnet3_adapter {
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
int share_intr;
+
+ dma_addr_t adapter_pa;
+ dma_addr_t pm_conf_pa;
+ dma_addr_t rss_conf_pa;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 767f7af3bd40..bf64b4191dcc 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -6,9 +6,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * TODO
- * - IPv6 (not in RFC)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -27,6 +24,7 @@
#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <linux/hash.h>
#include <linux/ethtool.h>
#include <net/arp.h>
@@ -41,6 +39,13 @@
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/vxlan.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <net/ip6_tunnel.h>
+#include <net/ip6_checksum.h>
+#endif
#define VXLAN_VERSION "0.1"
@@ -57,6 +62,9 @@
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
/* IP header + UDP + VXLAN + Ethernet header */
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
+/* IPv6 header + UDP + VXLAN + Ethernet header */
+#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
@@ -82,16 +90,6 @@ static int vxlan_net_id;
static const u8 all_zeros_mac[ETH_ALEN];
-/* per UDP socket information */
-struct vxlan_sock {
- struct hlist_node hlist;
- struct rcu_head rcu;
- struct work_struct del_work;
- atomic_t refcnt;
- struct socket *sock;
- struct hlist_head vni_list[VNI_HASH_SIZE];
-};
-
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
@@ -99,8 +97,14 @@ struct vxlan_net {
spinlock_t sock_lock;
};
+union vxlan_addr {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr sa;
+};
+
struct vxlan_rdst {
- __be32 remote_ip;
+ union vxlan_addr remote_ip;
__be16 remote_port;
u32 remote_vni;
u32 remote_ifindex;
@@ -127,7 +131,7 @@ struct vxlan_dev {
struct vxlan_sock *vn_sock; /* listening socket */
struct net_device *dev;
struct vxlan_rdst default_dst; /* default destination */
- __be32 saddr; /* source address */
+ union vxlan_addr saddr; /* source address */
__be16 dst_port;
__u16 port_min; /* source port range */
__u16 port_max;
@@ -153,6 +157,7 @@ struct vxlan_dev {
#define VXLAN_F_RSC 0x04
#define VXLAN_F_L2MISS 0x08
#define VXLAN_F_L3MISS 0x10
+#define VXLAN_F_IPV6 0x20 /* internal flag */
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
@@ -160,6 +165,96 @@ static struct workqueue_struct *vxlan_wq;
static void vxlan_sock_work(struct work_struct *work);
+#if IS_ENABLED(CONFIG_IPV6)
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ if (a->sa.sa_family != b->sa.sa_family)
+ return false;
+ if (a->sa.sa_family == AF_INET6)
+ return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
+ else
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_any(&ipa->sin6.sin6_addr);
+ else
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
+ else
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+}
+
+static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
+{
+ if (nla_len(nla) >= sizeof(struct in6_addr)) {
+ nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
+ ip->sa.sa_family = AF_INET6;
+ return 0;
+ } else if (nla_len(nla) >= sizeof(__be32)) {
+ ip->sin.sin_addr.s_addr = nla_get_be32(nla);
+ ip->sa.sa_family = AF_INET;
+ return 0;
+ } else {
+ return -EAFNOSUPPORT;
+ }
+}
+
+static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
+ const union vxlan_addr *ip)
+{
+ if (ip->sa.sa_family == AF_INET6)
+ return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
+ else
+ return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
+}
+
+#else /* !CONFIG_IPV6 */
+
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+}
+
+static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
+{
+ if (nla_len(nla) >= sizeof(struct in6_addr)) {
+ return -EAFNOSUPPORT;
+ } else if (nla_len(nla) >= sizeof(__be32)) {
+ ip->sin.sin_addr.s_addr = nla_get_be32(nla);
+ ip->sa.sa_family = AF_INET;
+ return 0;
+ } else {
+ return -EAFNOSUPPORT;
+ }
+}
+
+static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
+ const union vxlan_addr *ip)
+{
+ return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
+}
+#endif
+
/* Virtual Network hash table head */
static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
{
@@ -177,13 +272,18 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port)
/* First remote destination for a forwarding entry.
* Guaranteed to be non-NULL because remotes are never deleted.
*/
-static inline struct vxlan_rdst *first_remote(struct vxlan_fdb *fdb)
+static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
+{
+ return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
+}
+
+static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
{
- return list_first_or_null_rcu(&fdb->remotes, struct vxlan_rdst, list);
+ return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
}
/* Find VXLAN socket based on network namespace and UDP port */
-static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
+static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
{
struct vxlan_sock *vs;
@@ -194,16 +294,10 @@ static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
return NULL;
}
-/* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
{
- struct vxlan_sock *vs;
struct vxlan_dev *vxlan;
- vs = vxlan_find_port(net, port);
- if (!vs)
- return NULL;
-
hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
if (vxlan->default_dst.remote_vni == id)
return vxlan;
@@ -212,6 +306,18 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
return NULL;
}
+/* Look up VNI in a per net namespace table */
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+{
+ struct vxlan_sock *vs;
+
+ vs = vxlan_find_sock(net, port);
+ if (!vs)
+ return NULL;
+
+ return vxlan_vs_find_vni(vs, id);
+}
+
/* Fill in neighbour message in skbuff. */
static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
const struct vxlan_fdb *fdb,
@@ -235,7 +341,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (type == RTM_GETNEIGH) {
ndm->ndm_family = AF_INET;
- send_ip = rdst->remote_ip != htonl(INADDR_ANY);
+ send_ip = !vxlan_addr_any(&rdst->remote_ip);
send_eth = !is_zero_ether_addr(fdb->eth_addr);
} else
ndm->ndm_family = AF_BRIDGE;
@@ -247,7 +353,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
- if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
+ if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
goto nla_put_failure;
if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
@@ -279,7 +385,7 @@ static inline size_t vxlan_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
- + nla_total_size(sizeof(__be32)) /* NDA_DST */
+ + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
+ nla_total_size(sizeof(__be16)) /* NDA_PORT */
+ nla_total_size(sizeof(__be32)) /* NDA_VNI */
+ nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
@@ -297,7 +403,8 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
if (skb == NULL)
goto errout;
- err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, first_remote(fdb));
+ err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
+ first_remote_rtnl(fdb));
if (err < 0) {
/* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -312,14 +419,14 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
-static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
+static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb f = {
.state = NUD_STALE,
};
struct vxlan_rdst remote = {
- .remote_ip = ipa, /* goes to NDA_DST */
+ .remote_ip = *ipa, /* goes to NDA_DST */
.remote_vni = VXLAN_N_VID,
};
@@ -371,7 +478,7 @@ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
struct vxlan_fdb *f;
hlist_for_each_entry_rcu(f, head, hlist) {
- if (compare_ether_addr(mac, f->eth_addr) == 0)
+ if (ether_addr_equal(mac, f->eth_addr))
return f;
}
@@ -392,13 +499,13 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
/* caller should hold vxlan->hash_lock */
static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
- __be32 ip, __be16 port,
+ union vxlan_addr *ip, __be16 port,
__u32 vni, __u32 ifindex)
{
struct vxlan_rdst *rd;
list_for_each_entry(rd, &f->remotes, list) {
- if (rd->remote_ip == ip &&
+ if (vxlan_addr_equal(&rd->remote_ip, ip) &&
rd->remote_port == port &&
rd->remote_vni == vni &&
rd->remote_ifindex == ifindex)
@@ -408,9 +515,29 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
return NULL;
}
+/* Replace destination of unicast mac */
+static int vxlan_fdb_replace(struct vxlan_fdb *f,
+ union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
+{
+ struct vxlan_rdst *rd;
+
+ rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
+ if (rd)
+ return 0;
+
+ rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
+ if (!rd)
+ return 0;
+ rd->remote_ip = *ip;
+ rd->remote_port = port;
+ rd->remote_vni = vni;
+ rd->remote_ifindex = ifindex;
+ return 1;
+}
+
/* Add/update destinations for multicast */
static int vxlan_fdb_append(struct vxlan_fdb *f,
- __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
+ union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
{
struct vxlan_rdst *rd;
@@ -421,7 +548,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
return -ENOBUFS;
- rd->remote_ip = ip;
+ rd->remote_ip = *ip;
rd->remote_port = port;
rd->remote_vni = vni;
rd->remote_ifindex = ifindex;
@@ -431,9 +558,43 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
+/* Notify netdevs that UDP port started listening */
+static void vxlan_notify_add_rx_port(struct sock *sk)
+{
+ struct net_device *dev;
+ struct net *net = sock_net(sk);
+ sa_family_t sa_family = sk->sk_family;
+ u16 port = htons(inet_sk(sk)->inet_sport);
+
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
+ if (dev->netdev_ops->ndo_add_vxlan_port)
+ dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
+ port);
+ }
+ rcu_read_unlock();
+}
+
+/* Notify netdevs that UDP port is no more listening */
+static void vxlan_notify_del_rx_port(struct sock *sk)
+{
+ struct net_device *dev;
+ struct net *net = sock_net(sk);
+ sa_family_t sa_family = sk->sk_family;
+ u16 port = htons(inet_sk(sk)->inet_sport);
+
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
+ if (dev->netdev_ops->ndo_del_vxlan_port)
+ dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
+ port);
+ }
+ rcu_read_unlock();
+}
+
/* Add new entry to forwarding table -- assumes lock held */
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
- const u8 *mac, __be32 ip,
+ const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __u32 vni, __u32 ifindex,
__u8 ndm_flags)
@@ -458,6 +619,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
f->updated = jiffies;
notify = 1;
}
+ if ((flags & NLM_F_REPLACE)) {
+ /* Only change unicasts */
+ if (!(is_multicast_ether_addr(f->eth_addr) ||
+ is_zero_ether_addr(f->eth_addr))) {
+ int rc = vxlan_fdb_replace(f, ip, port, vni,
+ ifindex);
+
+ if (rc < 0)
+ return rc;
+ notify |= rc;
+ } else
+ return -EOPNOTSUPP;
+ }
if ((flags & NLM_F_APPEND) &&
(is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) {
@@ -474,7 +648,12 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
return -ENOSPC;
- netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
+ /* Disallow replace to add a multicast entry */
+ if ((flags & NLM_F_REPLACE) &&
+ (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
+ return -EOPNOTSUPP;
+
+ netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
f = kmalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return -ENOMEM;
@@ -499,12 +678,6 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return 0;
}
-static void vxlan_fdb_free_rdst(struct rcu_head *head)
-{
- struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
- kfree(rd);
-}
-
static void vxlan_fdb_free(struct rcu_head *head)
{
struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
@@ -528,17 +701,26 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
}
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
- __be32 *ip, __be16 *port, u32 *vni, u32 *ifindex)
+ union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
{
struct net *net = dev_net(vxlan->dev);
+ int err;
if (tb[NDA_DST]) {
- if (nla_len(tb[NDA_DST]) != sizeof(__be32))
- return -EAFNOSUPPORT;
-
- *ip = nla_get_be32(tb[NDA_DST]);
+ err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
+ if (err)
+ return err;
} else {
- *ip = htonl(INADDR_ANY);
+ union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
+ if (remote->sa.sa_family == AF_INET) {
+ ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
+ ip->sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ ip->sin6.sin6_addr = in6addr_any;
+ ip->sa.sa_family = AF_INET6;
+#endif
+ }
}
if (tb[NDA_PORT]) {
@@ -581,7 +763,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
{
struct vxlan_dev *vxlan = netdev_priv(dev);
/* struct net *net = dev_net(vxlan->dev); */
- __be32 ip;
+ union vxlan_addr ip;
__be16 port;
u32 vni, ifindex;
int err;
@@ -600,7 +782,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
spin_lock_bh(&vxlan->hash_lock);
- err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags,
+ err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
port, vni, ifindex, ndm->ndm_flags);
spin_unlock_bh(&vxlan->hash_lock);
@@ -615,7 +797,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
struct vxlan_rdst *rd = NULL;
- __be32 ip;
+ union vxlan_addr ip;
__be16 port;
u32 vni, ifindex;
int err;
@@ -631,8 +813,8 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
if (!f)
goto out;
- if (ip != htonl(INADDR_ANY)) {
- rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
+ if (!vxlan_addr_any(&ip)) {
+ rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
if (!rd)
goto out;
}
@@ -644,7 +826,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
*/
if (rd && !list_is_singular(&f->remotes)) {
list_del_rcu(&rd->list);
- call_rcu(&rd->rcu, vxlan_fdb_free_rdst);
+ kfree_rcu(rd, rcu);
goto out;
}
@@ -695,16 +877,16 @@ out:
* Return true if packet is bogus and should be droppped.
*/
static bool vxlan_snoop(struct net_device *dev,
- __be32 src_ip, const u8 *src_mac)
+ union vxlan_addr *src_ip, const u8 *src_mac)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) {
- struct vxlan_rdst *rdst = first_remote(f);
+ struct vxlan_rdst *rdst = first_remote_rcu(f);
- if (likely(rdst->remote_ip == src_ip))
+ if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
return false;
/* Don't migrate static entries, drop packets */
@@ -713,10 +895,10 @@ static bool vxlan_snoop(struct net_device *dev,
if (net_ratelimit())
netdev_info(dev,
- "%pM migrated from %pI4 to %pI4\n",
+ "%pM migrated from %pIS to %pIS\n",
src_mac, &rdst->remote_ip, &src_ip);
- rdst->remote_ip = src_ip;
+ rdst->remote_ip = *src_ip;
f->updated = jiffies;
vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
} else {
@@ -738,7 +920,7 @@ static bool vxlan_snoop(struct net_device *dev,
}
/* See if multicast group is already in use by other ID */
-static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
+static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip)
{
struct vxlan_dev *vxlan;
@@ -746,7 +928,8 @@ static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
if (!netif_running(vxlan->dev))
continue;
- if (vxlan->default_dst.remote_ip == remote_ip)
+ if (vxlan_addr_equal(&vxlan->default_dst.remote_ip,
+ remote_ip))
return true;
}
@@ -758,17 +941,25 @@ static void vxlan_sock_hold(struct vxlan_sock *vs)
atomic_inc(&vs->refcnt);
}
-static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
+void vxlan_sock_release(struct vxlan_sock *vs)
{
+ struct sock *sk = vs->sock->sk;
+ struct net *net = sock_net(sk);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
if (!atomic_dec_and_test(&vs->refcnt))
return;
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
+ smp_wmb();
+ vs->sock->sk->sk_user_data = NULL;
+ vxlan_notify_del_rx_port(sk);
spin_unlock(&vn->sock_lock);
queue_work(vxlan_wq, &vs->del_work);
}
+EXPORT_SYMBOL_GPL(vxlan_sock_release);
/* Callback to update multicast group membership when first VNI on
* multicast asddress is brought up
@@ -777,19 +968,28 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
static void vxlan_igmp_join(struct work_struct *work)
{
struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
- struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
struct sock *sk = vs->sock->sk;
- struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
- .imr_ifindex = vxlan->default_dst.remote_ifindex,
- };
+ union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
+ int ifindex = vxlan->default_dst.remote_ifindex;
lock_sock(sk);
- ip_mc_join_group(sk, &mreq);
+ if (ip->sa.sa_family == AF_INET) {
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
+ ip_mc_join_group(sk, &mreq);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
+ &ip->sin6.sin6_addr);
+#endif
+ }
release_sock(sk);
- vxlan_sock_release(vn, vs);
+ vxlan_sock_release(vs);
dev_put(vxlan->dev);
}
@@ -797,42 +997,45 @@ static void vxlan_igmp_join(struct work_struct *work)
static void vxlan_igmp_leave(struct work_struct *work)
{
struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
- struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
struct sock *sk = vs->sock->sk;
- struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
- .imr_ifindex = vxlan->default_dst.remote_ifindex,
- };
+ union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
+ int ifindex = vxlan->default_dst.remote_ifindex;
lock_sock(sk);
- ip_mc_leave_group(sk, &mreq);
+ if (ip->sa.sa_family == AF_INET) {
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
+ ip_mc_leave_group(sk, &mreq);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
+ &ip->sin6.sin6_addr);
+#endif
+ }
+
release_sock(sk);
- vxlan_sock_release(vn, vs);
+ vxlan_sock_release(vs);
dev_put(vxlan->dev);
}
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
- struct iphdr *oip;
+ struct vxlan_sock *vs;
struct vxlanhdr *vxh;
- struct vxlan_dev *vxlan;
- struct pcpu_tstats *stats;
__be16 port;
- __u32 vni;
- int err;
-
- /* pop off outer UDP header */
- __skb_pull(skb, sizeof(struct udphdr));
/* Need Vxlan and inner Ethernet header to be present */
- if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
+ if (!pskb_may_pull(skb, VXLAN_HLEN))
goto error;
- /* Drop packets with reserved bits set */
- vxh = (struct vxlanhdr *) skb->data;
+ /* Return packets with reserved bits set */
+ vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
(vxh->vx_vni & htonl(0xff))) {
netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
@@ -840,40 +1043,72 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto error;
}
- __skb_pull(skb, sizeof(struct vxlanhdr));
+ if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
+ goto drop;
- /* Is this VNI defined? */
- vni = ntohl(vxh->vx_vni) >> 8;
port = inet_sk(sk)->inet_sport;
- vxlan = vxlan_find_vni(sock_net(sk), vni, port);
- if (!vxlan) {
- netdev_dbg(skb->dev, "unknown vni %d port %u\n",
- vni, ntohs(port));
+
+ smp_read_barrier_depends();
+ vs = (struct vxlan_sock *)sk->sk_user_data;
+ if (!vs)
goto drop;
- }
- if (!pskb_may_pull(skb, ETH_HLEN)) {
- vxlan->dev->stats.rx_length_errors++;
- vxlan->dev->stats.rx_errors++;
+ vs->rcv(vs, skb, vxh->vx_vni);
+ return 0;
+
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+ return 0;
+
+error:
+ /* Return non vxlan pkt */
+ return 1;
+}
+
+static void vxlan_rcv(struct vxlan_sock *vs,
+ struct sk_buff *skb, __be32 vx_vni)
+{
+ struct iphdr *oip = NULL;
+ struct ipv6hdr *oip6 = NULL;
+ struct vxlan_dev *vxlan;
+ struct pcpu_tstats *stats;
+ union vxlan_addr saddr;
+ __u32 vni;
+ int err = 0;
+ union vxlan_addr *remote_ip;
+
+ vni = ntohl(vx_vni) >> 8;
+ /* Is this VNI defined? */
+ vxlan = vxlan_vs_find_vni(vs, vni);
+ if (!vxlan)
goto drop;
- }
+ remote_ip = &vxlan->default_dst.remote_ip;
skb_reset_mac_header(skb);
-
- /* Re-examine inner Ethernet packet */
- oip = ip_hdr(skb);
skb->protocol = eth_type_trans(skb, vxlan->dev);
/* Ignore packet loops (and multicast echo) */
- if (compare_ether_addr(eth_hdr(skb)->h_source,
- vxlan->dev->dev_addr) == 0)
+ if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
goto drop;
+ /* Re-examine inner Ethernet packet */
+ if (remote_ip->sa.sa_family == AF_INET) {
+ oip = ip_hdr(skb);
+ saddr.sin.sin_addr.s_addr = oip->saddr;
+ saddr.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ oip6 = ipv6_hdr(skb);
+ saddr.sin6.sin6_addr = oip6->saddr;
+ saddr.sa.sa_family = AF_INET6;
+#endif
+ }
+
if ((vxlan->flags & VXLAN_F_LEARN) &&
- vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
+ vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
goto drop;
- __skb_tunnel_rx(skb, vxlan->dev);
skb_reset_network_header(skb);
/* If the NIC driver gave us an encapsulated packet with
@@ -887,11 +1122,20 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb->encapsulation = 0;
- err = IP_ECN_decapsulate(oip, skb);
+ if (oip6)
+ err = IP6_ECN_decapsulate(oip6, skb);
+ if (oip)
+ err = IP_ECN_decapsulate(oip, skb);
+
if (unlikely(err)) {
- if (log_ecn_error)
- net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
- &oip->saddr, oip->tos);
+ if (log_ecn_error) {
+ if (oip6)
+ net_info_ratelimited("non-ECT from %pI6\n",
+ &oip6->saddr);
+ if (oip)
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ &oip->saddr, oip->tos);
+ }
if (err > 1) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
@@ -907,16 +1151,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
netif_rx(skb);
- return 0;
-error:
- /* Put UDP header back */
- __skb_push(skb, sizeof(struct udphdr));
-
- return 1;
+ return;
drop:
/* Consume bad packet */
kfree_skb(skb);
- return 0;
}
static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
@@ -967,7 +1205,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
}
f = vxlan_find_mac(vxlan, n->ha);
- if (f && first_remote(f)->remote_ip == htonl(INADDR_ANY)) {
+ if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
goto out;
@@ -985,18 +1223,87 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
if (netif_rx_ni(reply) == NET_RX_DROP)
dev->stats.rx_dropped++;
- } else if (vxlan->flags & VXLAN_F_L3MISS)
- vxlan_ip_miss(dev, tip);
+ } else if (vxlan->flags & VXLAN_F_L3MISS) {
+ union vxlan_addr ipa = {
+ .sin.sin_addr.s_addr = tip,
+ .sa.sa_family = AF_INET,
+ };
+
+ vxlan_ip_miss(dev, &ipa);
+ }
out:
consume_skb(skb);
return NETDEV_TX_OK;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct neighbour *n;
+ union vxlan_addr ipa;
+ const struct ipv6hdr *iphdr;
+ const struct in6_addr *saddr, *daddr;
+ struct nd_msg *msg;
+ struct inet6_dev *in6_dev = NULL;
+
+ in6_dev = __in6_dev_get(dev);
+ if (!in6_dev)
+ goto out;
+
+ if (!pskb_may_pull(skb, skb->len))
+ goto out;
+
+ iphdr = ipv6_hdr(skb);
+ saddr = &iphdr->saddr;
+ daddr = &iphdr->daddr;
+
+ if (ipv6_addr_loopback(daddr) ||
+ ipv6_addr_is_multicast(daddr))
+ goto out;
+
+ msg = (struct nd_msg *)skb_transport_header(skb);
+ if (msg->icmph.icmp6_code != 0 ||
+ msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
+ goto out;
+
+ n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
+
+ if (n) {
+ struct vxlan_fdb *f;
+
+ if (!(n->nud_state & NUD_CONNECTED)) {
+ neigh_release(n);
+ goto out;
+ }
+
+ f = vxlan_find_mac(vxlan, n->ha);
+ if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
+ /* bridge-local neighbor */
+ neigh_release(n);
+ goto out;
+ }
+
+ ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
+ !!in6_dev->cnf.forwarding,
+ true, false, false);
+ neigh_release(n);
+ } else if (vxlan->flags & VXLAN_F_L3MISS) {
+ ipa.sin6.sin6_addr = *daddr;
+ ipa.sa.sa_family = AF_INET6;
+ vxlan_ip_miss(dev, &ipa);
+ }
+
+out:
+ consume_skb(skb);
+ return NETDEV_TX_OK;
+}
+#endif
+
static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct neighbour *n;
- struct iphdr *pip;
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
return false;
@@ -1004,11 +1311,47 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
n = NULL;
switch (ntohs(eth_hdr(skb)->h_proto)) {
case ETH_P_IP:
+ {
+ struct iphdr *pip;
+
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return false;
pip = ip_hdr(skb);
n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
+ if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ union vxlan_addr ipa = {
+ .sin.sin_addr.s_addr = pip->daddr,
+ .sa.sa_family = AF_INET,
+ };
+
+ vxlan_ip_miss(dev, &ipa);
+ return false;
+ }
+
break;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ case ETH_P_IPV6:
+ {
+ struct ipv6hdr *pip6;
+
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return false;
+ pip6 = ipv6_hdr(skb);
+ n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
+ if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ union vxlan_addr ipa = {
+ .sin6.sin6_addr = pip6->daddr,
+ .sa.sa_family = AF_INET6,
+ };
+
+ vxlan_ip_miss(dev, &ipa);
+ return false;
+ }
+
+ break;
+ }
+#endif
default:
return false;
}
@@ -1016,7 +1359,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
if (n) {
bool diff;
- diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
+ diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
if (diff) {
memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
dev->addr_len);
@@ -1024,8 +1367,8 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
}
neigh_release(n);
return diff;
- } else if (vxlan->flags & VXLAN_F_L3MISS)
- vxlan_ip_miss(dev, pip->daddr);
+ }
+
return false;
}
@@ -1035,11 +1378,8 @@ static void vxlan_sock_put(struct sk_buff *skb)
}
/* On transmit, associate with the tunnel socket */
-static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
+static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
- struct sock *sk = vxlan->vn_sock->sock->sk;
-
skb_orphan(skb);
sock_hold(sk);
skb->sk = sk;
@@ -1051,9 +1391,9 @@ static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
* better and maybe available from hardware
* secondary choice is to use jhash on the Ethernet header
*/
-static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
+__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
{
- unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
+ unsigned int range = (port_max - port_min) + 1;
u32 hash;
hash = skb_get_rxhash(skb);
@@ -1061,8 +1401,9 @@ static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
hash = jhash(skb->data, 2 * ETH_ALEN,
(__force u32) skb->protocol);
- return htons((((u64) hash * range) >> 32) + vxlan->port_min);
+ return htons((((u64) hash * range) >> 32) + port_min);
}
+EXPORT_SYMBOL_GPL(vxlan_src_port);
static int handle_offloads(struct sk_buff *skb)
{
@@ -1078,21 +1419,187 @@ static int handle_offloads(struct sk_buff *skb)
return 0;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static int vxlan6_xmit_skb(struct vxlan_sock *vs,
+ struct dst_entry *dst, struct sk_buff *skb,
+ struct net_device *dev, struct in6_addr *saddr,
+ struct in6_addr *daddr, __u8 prio, __u8 ttl,
+ __be16 src_port, __be16 dst_port, __be32 vni)
+{
+ struct ipv6hdr *ip6h;
+ struct vxlanhdr *vxh;
+ struct udphdr *uh;
+ int min_headroom;
+ int err;
+
+ if (!skb->encapsulation) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
+ skb_scrub_packet(skb, false);
+
+ min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ + VXLAN_HLEN + sizeof(struct ipv6hdr)
+ + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+
+ /* Need space for new headers (invalidates iph ptr) */
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ return err;
+
+ if (vlan_tx_tag_present(skb)) {
+ if (WARN_ON(!__vlan_put_tag(skb,
+ skb->vlan_proto,
+ vlan_tx_tag_get(skb))))
+ return -ENOMEM;
+
+ skb->vlan_tci = 0;
+ }
+
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_FLAGS);
+ vxh->vx_vni = vni;
+
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+
+ uh->dest = dst_port;
+ uh->source = src_port;
+
+ uh->len = htons(skb->len);
+ uh->check = 0;
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_set(skb, dst);
+
+ if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) {
+ __wsum csum = skb_checksum(skb, 0, skb->len, 0);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ uh->check = csum_ipv6_magic(saddr, daddr, skb->len,
+ IPPROTO_UDP, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ uh->check = ~csum_ipv6_magic(saddr, daddr,
+ skb->len, IPPROTO_UDP, 0);
+ }
+
+ __skb_push(skb, sizeof(*ip6h));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6h->version = 6;
+ ip6h->priority = prio;
+ ip6h->flow_lbl[0] = 0;
+ ip6h->flow_lbl[1] = 0;
+ ip6h->flow_lbl[2] = 0;
+ ip6h->payload_len = htons(skb->len);
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = *daddr;
+ ip6h->saddr = *saddr;
+
+ vxlan_set_owner(vs->sock->sk, skb);
+
+ err = handle_offloads(skb);
+ if (err)
+ return err;
+
+ ip6tunnel_xmit(skb, dev);
+ return 0;
+}
+#endif
+
+int vxlan_xmit_skb(struct vxlan_sock *vs,
+ struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port, __be32 vni)
+{
+ struct vxlanhdr *vxh;
+ struct udphdr *uh;
+ int min_headroom;
+ int err;
+
+ if (!skb->encapsulation) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + VXLAN_HLEN + sizeof(struct iphdr)
+ + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+
+ /* Need space for new headers (invalidates iph ptr) */
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ return err;
+
+ if (vlan_tx_tag_present(skb)) {
+ if (WARN_ON(!__vlan_put_tag(skb,
+ skb->vlan_proto,
+ vlan_tx_tag_get(skb))))
+ return -ENOMEM;
+
+ skb->vlan_tci = 0;
+ }
+
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_FLAGS);
+ vxh->vx_vni = vni;
+
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+
+ uh->dest = dst_port;
+ uh->source = src_port;
+
+ uh->len = htons(skb->len);
+ uh->check = 0;
+
+ vxlan_set_owner(vs->sock->sk, skb);
+
+ err = handle_offloads(skb);
+ if (err)
+ return err;
+
+ return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df,
+ false);
+}
+EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
+
/* Bypass encapsulation if the destination is local */
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan)
{
struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+ union vxlan_addr loopback;
+ union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
skb->dev = dst_vxlan->dev;
__skb_pull(skb, skb_network_offset(skb));
+ if (remote_ip->sa.sa_family == AF_INET) {
+ loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ loopback.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ loopback.sin6.sin6_addr = in6addr_loopback;
+ loopback.sa.sa_family = AF_INET6;
+#endif
+ }
+
if (dst_vxlan->flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, htonl(INADDR_LOOPBACK),
- eth_hdr(skb)->h_source);
+ vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tx_packets++;
@@ -1113,13 +1620,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_rdst *rdst, bool did_rsc)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct rtable *rt;
+ struct rtable *rt = NULL;
const struct iphdr *old_iph;
- struct vxlanhdr *vxh;
- struct udphdr *uh;
struct flowi4 fl4;
- __be32 dst;
- __be16 src_port, dst_port;
+ union vxlan_addr *dst;
+ __be16 src_port = 0, dst_port;
u32 vni;
__be16 df = 0;
__u8 tos, ttl;
@@ -1127,9 +1632,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
vni = rdst->remote_vni;
- dst = rdst->remote_ip;
+ dst = &rdst->remote_ip;
- if (!dst) {
+ if (vxlan_addr_any(dst)) {
if (did_rsc) {
/* short-circuited back to local bridge */
vxlan_encap_bypass(skb, vxlan, vxlan);
@@ -1138,84 +1643,113 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
- if (!skb->encapsulation) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
-
- /* Need space for new headers (invalidates iph ptr) */
- if (skb_cow_head(skb, VXLAN_HEADROOM))
- goto drop;
-
old_iph = ip_hdr(skb);
ttl = vxlan->ttl;
- if (!ttl && IN_MULTICAST(ntohl(dst)))
+ if (!ttl && vxlan_addr_multicast(dst))
ttl = 1;
tos = vxlan->tos;
if (tos == 1)
tos = ip_tunnel_get_dsfield(old_iph, skb);
- src_port = vxlan_src_port(vxlan, skb);
+ src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = rdst->remote_ifindex;
- fl4.flowi4_tos = RT_TOS(tos);
- fl4.daddr = dst;
- fl4.saddr = vxlan->saddr;
+ if (dst->sa.sa_family == AF_INET) {
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_oif = rdst->remote_ifindex;
+ fl4.flowi4_tos = RT_TOS(tos);
+ fl4.daddr = dst->sin.sin_addr.s_addr;
+ fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (IS_ERR(rt)) {
- netdev_dbg(dev, "no route to %pI4\n", &dst);
- dev->stats.tx_carrier_errors++;
- goto tx_error;
- }
+ rt = ip_route_output_key(dev_net(dev), &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to %pI4\n",
+ &dst->sin.sin_addr.s_addr);
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
- if (rt->dst.dev == dev) {
- netdev_dbg(dev, "circular route to %pI4\n", &dst);
- ip_rt_put(rt);
- dev->stats.collisions++;
- goto tx_error;
- }
+ if (rt->dst.dev == dev) {
+ netdev_dbg(dev, "circular route to %pI4\n",
+ &dst->sin.sin_addr.s_addr);
+ dev->stats.collisions++;
+ goto tx_error;
+ }
- /* Bypass encapsulation if the destination is local */
- if (rt->rt_flags & RTCF_LOCAL &&
- !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
- struct vxlan_dev *dst_vxlan;
+ /* Bypass encapsulation if the destination is local */
+ if (rt->rt_flags & RTCF_LOCAL &&
+ !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+ struct vxlan_dev *dst_vxlan;
- ip_rt_put(rt);
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
- if (!dst_vxlan)
- goto tx_error;
- vxlan_encap_bypass(skb, vxlan, dst_vxlan);
- return;
- }
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = htonl(vni << 8);
+ ip_rt_put(rt);
+ dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ if (!dst_vxlan)
+ goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+ return;
+ }
- __skb_push(skb, sizeof(*uh));
- skb_reset_transport_header(skb);
- uh = udp_hdr(skb);
+ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
- uh->dest = dst_port;
- uh->source = src_port;
+ err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
+ fl4.saddr, dst->sin.sin_addr.s_addr,
+ tos, ttl, df, src_port, dst_port,
+ htonl(vni << 8));
- uh->len = htons(skb->len);
- uh->check = 0;
+ if (err < 0)
+ goto rt_tx_error;
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct sock *sk = vxlan->vn_sock->sock->sk;
+ struct dst_entry *ndst;
+ struct flowi6 fl6;
+ u32 flags;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = rdst->remote_ifindex;
+ fl6.daddr = dst->sin6.sin6_addr;
+ fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+ fl6.flowi6_proto = IPPROTO_UDP;
+
+ if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
+ netdev_dbg(dev, "no route to %pI6\n",
+ &dst->sin6.sin6_addr);
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
- vxlan_set_owner(dev, skb);
+ if (ndst->dev == dev) {
+ netdev_dbg(dev, "circular route to %pI6\n",
+ &dst->sin6.sin6_addr);
+ dst_release(ndst);
+ dev->stats.collisions++;
+ goto tx_error;
+ }
- if (handle_offloads(skb))
- goto drop;
+ /* Bypass encapsulation if the destination is local */
+ flags = ((struct rt6_info *)ndst)->rt6i_flags;
+ if (flags & RTF_LOCAL &&
+ !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+ struct vxlan_dev *dst_vxlan;
+
+ dst_release(ndst);
+ dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ if (!dst_vxlan)
+ goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+ return;
+ }
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
- ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+ ttl = ttl ? : ip6_dst_hoplimit(ndst);
- err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, dst,
- IPPROTO_UDP, tos, ttl, df);
- iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+ err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
+ dev, &fl6.saddr, &fl6.daddr, 0, ttl,
+ src_port, dst_port, htonl(vni << 8));
+#endif
+ }
return;
@@ -1223,6 +1757,8 @@ drop:
dev->stats.tx_dropped++;
goto tx_free;
+rt_tx_error:
+ ip_rt_put(rt);
tx_error:
dev->stats.tx_errors++;
tx_free:
@@ -1246,14 +1782,29 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
- if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
- return arp_reduce(dev, skb);
+ if ((vxlan->flags & VXLAN_F_PROXY)) {
+ if (ntohs(eth->h_proto) == ETH_P_ARP)
+ return arp_reduce(dev, skb);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
+ skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
+ ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
+ struct nd_msg *msg;
+
+ msg = (struct nd_msg *)skb_transport_header(skb);
+ if (msg->icmph.icmp6_code == 0 &&
+ msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ return neigh_reduce(dev, skb);
+ }
+#endif
+ }
f = vxlan_find_mac(vxlan, eth->h_dest);
did_rsc = false;
if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
- ntohs(eth->h_proto) == ETH_P_IP) {
+ (ntohs(eth->h_proto) == ETH_P_IP ||
+ ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
if (did_rsc)
f = vxlan_find_mac(vxlan, eth->h_dest);
@@ -1321,25 +1872,31 @@ static void vxlan_cleanup(unsigned long arg)
mod_timer(&vxlan->age_timer, next_timer);
}
+static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
+{
+ __u32 vni = vxlan->default_dst.remote_vni;
+
+ vxlan->vn_sock = vs;
+ hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+}
+
/* Setup stats when device is created */
static int vxlan_init(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_sock *vs;
- __u32 vni = vxlan->default_dst.remote_vni;
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_port(dev_net(dev), vxlan->dst_port);
+ vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
if (vs) {
/* If we have a socket with same port already, reuse it */
atomic_inc(&vs->refcnt);
- vxlan->vn_sock = vs;
- hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+ vxlan_vs_add_dev(vs, vxlan);
} else {
/* otherwise make new socket outside of RTNL */
dev_hold(dev);
@@ -1364,13 +1921,12 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
vxlan_fdb_delete_default(vxlan);
if (vs)
- vxlan_sock_release(vn, vs);
+ vxlan_sock_release(vs);
free_percpu(dev->tstats);
}
@@ -1385,8 +1941,8 @@ static int vxlan_open(struct net_device *dev)
if (!vs)
return -ENOTCONN;
- if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
- vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+ vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1424,8 +1980,8 @@ static int vxlan_stop(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
- if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
- ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
+ if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+ ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_leave);
@@ -1464,6 +2020,34 @@ static struct device_type vxlan_type = {
.name = "vxlan",
};
+/* Calls the ndo_add_vxlan_port of the caller in order to
+ * supply the listening VXLAN udp ports.
+ */
+void vxlan_get_rx_port(struct net_device *dev)
+{
+ struct vxlan_sock *vs;
+ struct net *net = dev_net(dev);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ sa_family_t sa_family;
+ u16 port;
+ int i;
+
+ if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port)
+ return;
+
+ spin_lock(&vn->sock_lock);
+ for (i = 0; i < PORT_HASH_SIZE; ++i) {
+ hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) {
+ port = htons(inet_sk(vs->sock->sk)->inet_sport);
+ sa_family = vs->sock->sk->sk_family;
+ dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
+ port);
+ }
+ }
+ spin_unlock(&vn->sock_lock);
+}
+EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
+
/* Initialize the device structure. */
static void vxlan_setup(struct net_device *dev)
{
@@ -1473,7 +2057,10 @@ static void vxlan_setup(struct net_device *dev)
eth_hw_addr_random(dev);
ether_setup(dev);
- dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
+ if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
+ dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
+ else
+ dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
dev->netdev_ops = &vxlan_netdev_ops;
dev->destructor = free_netdev;
@@ -1486,8 +2073,11 @@ static void vxlan_setup(struct net_device *dev)
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->vlan_features = dev->features;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -1515,8 +2105,10 @@ static void vxlan_setup(struct net_device *dev)
static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_ID] = { .type = NLA_U32 },
[IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
[IFLA_VXLAN_LINK] = { .type = NLA_U32 },
[IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+ [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
[IFLA_VXLAN_TOS] = { .type = NLA_U8 },
[IFLA_VXLAN_TTL] = { .type = NLA_U8 },
[IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
@@ -1587,99 +2179,199 @@ static void vxlan_del_work(struct work_struct *work)
kfree_rcu(vs, rcu);
}
-static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
+#if IS_ENABLED(CONFIG_IPV6)
+/* Create UDP socket for encapsulation receive. AF_INET6 socket
+ * could be used for both IPv4 and IPv6 communications, but
+ * users may set bindv6only=1.
+ */
+static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+{
+ struct sock *sk;
+ struct socket *sock;
+ struct sockaddr_in6 vxlan_addr = {
+ .sin6_family = AF_INET6,
+ .sin6_port = port,
+ };
+ int rc, val = 1;
+
+ rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ if (rc < 0) {
+ pr_debug("UDPv6 socket create failed\n");
+ return rc;
+ }
+
+ /* Put in proper namespace */
+ sk = sock->sk;
+ sk_change_net(sk, net);
+
+ kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY,
+ (char *)&val, sizeof(val));
+ rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr,
+ sizeof(struct sockaddr_in6));
+ if (rc < 0) {
+ pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
+ &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
+ sk_release_kernel(sk);
+ return rc;
+ }
+ /* At this point, IPv6 module should have been loaded in
+ * sock_create_kern().
+ */
+ BUG_ON(!ipv6_stub);
+
+ *psock = sock;
+ /* Disable multicast loopback */
+ inet_sk(sk)->mc_loop = 0;
+ return 0;
+}
+
+#else
+
+static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+{
+ return -EPFNOSUPPORT;
+}
+#endif
+
+static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
{
- struct vxlan_sock *vs;
struct sock *sk;
+ struct socket *sock;
struct sockaddr_in vxlan_addr = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_ANY),
.sin_port = port,
};
int rc;
- unsigned int h;
-
- vs = kmalloc(sizeof(*vs), GFP_KERNEL);
- if (!vs)
- return ERR_PTR(-ENOMEM);
-
- for (h = 0; h < VNI_HASH_SIZE; ++h)
- INIT_HLIST_HEAD(&vs->vni_list[h]);
-
- INIT_WORK(&vs->del_work, vxlan_del_work);
/* Create UDP socket for encapsulation receive. */
- rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
+ rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (rc < 0) {
pr_debug("UDP socket create failed\n");
- kfree(vs);
- return ERR_PTR(rc);
+ return rc;
}
/* Put in proper namespace */
- sk = vs->sock->sk;
+ sk = sock->sk;
sk_change_net(sk, net);
- rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
+ rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr,
sizeof(vxlan_addr));
if (rc < 0) {
pr_debug("bind for UDP socket %pI4:%u (%d)\n",
&vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
sk_release_kernel(sk);
- kfree(vs);
- return ERR_PTR(rc);
+ return rc;
}
+ *psock = sock;
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
+ return 0;
+}
+
+/* Create new listen socket if needed */
+static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data, bool ipv6)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_sock *vs;
+ struct socket *sock;
+ struct sock *sk;
+ int rc = 0;
+ unsigned int h;
+
+ vs = kmalloc(sizeof(*vs), GFP_KERNEL);
+ if (!vs)
+ return ERR_PTR(-ENOMEM);
+
+ for (h = 0; h < VNI_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&vs->vni_list[h]);
+
+ INIT_WORK(&vs->del_work, vxlan_del_work);
+
+ if (ipv6)
+ rc = create_v6_sock(net, port, &sock);
+ else
+ rc = create_v4_sock(net, port, &sock);
+ if (rc < 0) {
+ kfree(vs);
+ return ERR_PTR(rc);
+ }
+
+ vs->sock = sock;
+ sk = sock->sk;
+ atomic_set(&vs->refcnt, 1);
+ vs->rcv = rcv;
+ vs->data = data;
+ smp_wmb();
+ vs->sock->sk->sk_user_data = vs;
+
+ spin_lock(&vn->sock_lock);
+ hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
+ vxlan_notify_add_rx_port(sk);
+ spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
udp_sk(sk)->encap_type = 1;
udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
- udp_encap_enable();
- atomic_set(&vs->refcnt, 1);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6)
+ ipv6_stub->udpv6_encap_enable();
+ else
+#endif
+ udp_encap_enable();
return vs;
}
-/* Scheduled at device creation to bind to a socket */
-static void vxlan_sock_work(struct work_struct *work)
+struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data,
+ bool no_share, bool ipv6)
{
- struct vxlan_dev *vxlan
- = container_of(work, struct vxlan_dev, sock_work);
- struct net_device *dev = vxlan->dev;
- struct net *net = dev_net(dev);
- __u32 vni = vxlan->default_dst.remote_vni;
- __be16 port = vxlan->dst_port;
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- struct vxlan_sock *nvs, *ovs;
+ struct vxlan_sock *vs;
- nvs = vxlan_socket_create(net, port);
- if (IS_ERR(nvs)) {
- netdev_err(vxlan->dev, "Can not create UDP socket, %ld\n",
- PTR_ERR(nvs));
- goto out;
- }
+ vs = vxlan_socket_create(net, port, rcv, data, ipv6);
+ if (!IS_ERR(vs))
+ return vs;
+
+ if (no_share) /* Return error if sharing is not allowed. */
+ return vs;
spin_lock(&vn->sock_lock);
- /* Look again to see if can reuse socket */
- ovs = vxlan_find_port(net, port);
- if (ovs) {
- atomic_inc(&ovs->refcnt);
- vxlan->vn_sock = ovs;
- hlist_add_head_rcu(&vxlan->hlist, vni_head(ovs, vni));
- spin_unlock(&vn->sock_lock);
-
- sk_release_kernel(nvs->sock->sk);
- kfree(nvs);
- } else {
- vxlan->vn_sock = nvs;
- hlist_add_head_rcu(&nvs->hlist, vs_head(net, port));
- hlist_add_head_rcu(&vxlan->hlist, vni_head(nvs, vni));
- spin_unlock(&vn->sock_lock);
+ vs = vxlan_find_sock(net, port);
+ if (vs) {
+ if (vs->rcv == rcv)
+ atomic_inc(&vs->refcnt);
+ else
+ vs = ERR_PTR(-EBUSY);
}
-out:
- dev_put(dev);
+ spin_unlock(&vn->sock_lock);
+
+ if (!vs)
+ vs = ERR_PTR(-EINVAL);
+
+ return vs;
+}
+EXPORT_SYMBOL_GPL(vxlan_sock_add);
+
+/* Scheduled at device creation to bind to a socket */
+static void vxlan_sock_work(struct work_struct *work)
+{
+ struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
+ struct net *net = dev_net(vxlan->dev);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ __be16 port = vxlan->dst_port;
+ struct vxlan_sock *nvs;
+
+ nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6);
+ spin_lock(&vn->sock_lock);
+ if (!IS_ERR(nvs))
+ vxlan_vs_add_dev(nvs, vxlan);
+ spin_unlock(&vn->sock_lock);
+
+ dev_put(vxlan->dev);
}
static int vxlan_newlink(struct net *net, struct net_device *dev,
@@ -1690,6 +2382,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
struct vxlan_rdst *dst = &vxlan->default_dst;
__u32 vni;
int err;
+ bool use_ipv6 = false;
if (!data[IFLA_VXLAN_ID])
return -EINVAL;
@@ -1697,11 +2390,32 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
dst->remote_vni = vni;
- if (data[IFLA_VXLAN_GROUP])
- dst->remote_ip = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+ if (data[IFLA_VXLAN_GROUP]) {
+ dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+ dst->remote_ip.sa.sa_family = AF_INET;
+ } else if (data[IFLA_VXLAN_GROUP6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+
+ nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
+ sizeof(struct in6_addr));
+ dst->remote_ip.sa.sa_family = AF_INET6;
+ use_ipv6 = true;
+ }
- if (data[IFLA_VXLAN_LOCAL])
- vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
+ if (data[IFLA_VXLAN_LOCAL]) {
+ vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
+ vxlan->saddr.sa.sa_family = AF_INET;
+ } else if (data[IFLA_VXLAN_LOCAL6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+
+ /* TODO: respect scope id */
+ nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
+ sizeof(struct in6_addr));
+ vxlan->saddr.sa.sa_family = AF_INET6;
+ use_ipv6 = true;
+ }
if (data[IFLA_VXLAN_LINK] &&
(dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
@@ -1713,12 +2427,23 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
return -ENODEV;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ if (use_ipv6) {
+ struct inet6_dev *idev = __in6_dev_get(lowerdev);
+ if (idev && idev->cnf.disable_ipv6) {
+ pr_info("IPv6 is disabled via sysctl\n");
+ return -EPERM;
+ }
+ vxlan->flags |= VXLAN_F_IPV6;
+ }
+#endif
+
if (!tb[IFLA_MTU])
- dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
+ dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
/* update header length based on lower device */
dev->hard_header_len = lowerdev->hard_header_len +
- VXLAN_HEADROOM;
+ (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
}
if (data[IFLA_VXLAN_TOS])
@@ -1769,7 +2494,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
/* create an fdb entry for default destination */
err = vxlan_fdb_create(vxlan, all_zeros_mac,
- vxlan->default_dst.remote_ip,
+ &vxlan->default_dst.remote_ip,
NUD_REACHABLE|NUD_PERMANENT,
NLM_F_EXCL|NLM_F_CREATE,
vxlan->dst_port, vxlan->default_dst.remote_vni,
@@ -1794,7 +2519,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
struct vxlan_dev *vxlan = netdev_priv(dev);
spin_lock(&vn->sock_lock);
- hlist_del_rcu(&vxlan->hlist);
+ if (!hlist_unhashed(&vxlan->hlist))
+ hlist_del_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
list_del(&vxlan->next);
@@ -1805,9 +2531,9 @@ static size_t vxlan_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
- nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
+ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
- nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
+ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
@@ -1834,14 +2560,36 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
goto nla_put_failure;
- if (dst->remote_ip && nla_put_be32(skb, IFLA_VXLAN_GROUP, dst->remote_ip))
- goto nla_put_failure;
+ if (!vxlan_addr_any(&dst->remote_ip)) {
+ if (dst->remote_ip.sa.sa_family == AF_INET) {
+ if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
+ dst->remote_ip.sin.sin_addr.s_addr))
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
+ &dst->remote_ip.sin6.sin6_addr))
+ goto nla_put_failure;
+#endif
+ }
+ }
if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
goto nla_put_failure;
- if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
- goto nla_put_failure;
+ if (!vxlan_addr_any(&vxlan->saddr)) {
+ if (vxlan->saddr.sa.sa_family == AF_INET) {
+ if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
+ vxlan->saddr.sin.sin_addr.s_addr))
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
+ &vxlan->saddr.sin6.sin6_addr))
+ goto nla_put_failure;
+#endif
+ }
+ }
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index d43f4efd3e07..5bbcb5e3ee0c 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -176,7 +176,7 @@ static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
#ifndef MODULE
typedef u32 iarr[];
-static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac };
+static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac };
#endif
/* A zero-terminated list of I/O addresses to be probed on ISA bus */
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index d0adbaf86186..7fe19648f10e 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2693,7 +2693,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
dev->base_addr = ethdev->base_addr;
dev->wireless_data = ethdev->wireless_data;
SET_NETDEV_DEV(dev, ethdev->dev.parent);
- memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
+ eth_hw_addr_inherit(dev, ethdev);
err = register_netdev(dev);
if (err<0) {
free_netdev(dev);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index daeafeff186b..e0ba7cd14252 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -159,7 +159,7 @@ struct ath_common {
bool btcoex_enabled;
bool disable_ani;
- bool antenna_diversity;
+ bool bt_ant_diversity;
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 1a2ef51b69d9..744da6d1c405 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -20,6 +20,12 @@
#include "debug.h"
#include "htc.h"
+void ath10k_bmi_start(struct ath10k *ar)
+{
+ ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
+ ar->bmi.done_sent = false;
+}
+
int ath10k_bmi_done(struct ath10k *ar)
{
struct bmi_cmd cmd;
@@ -105,7 +111,8 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
&resp, &rxlen);
if (ret) {
- ath10k_warn("unable to read from the device\n");
+ ath10k_warn("unable to read from the device (%d)\n",
+ ret);
return ret;
}
@@ -149,7 +156,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
- ath10k_warn("unable to write to the device\n");
+ ath10k_warn("unable to write to the device (%d)\n",
+ ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 32c56aa33a5e..8d81ce1cec21 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -184,6 +184,7 @@ struct bmi_target_info {
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1
+void ath10k_bmi_start(struct ath10k *ar);
int ath10k_bmi_done(struct ath10k *ar);
int ath10k_bmi_get_target_info(struct ath10k *ar,
struct bmi_target_info *target_info);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 61a8ac70d3ca..f8b969f518f8 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -79,7 +79,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *indicator_addr;
- if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+ if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
return;
}
@@ -637,6 +637,7 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
ath10k_pci_wake(ar);
src_ring->hw_index =
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ src_ring->hw_index &= nentries_mask;
ath10k_pci_sleep(ar);
}
read_index = src_ring->hw_index;
@@ -950,10 +951,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_pci_wake(ar);
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ src_ring->sw_index &= src_ring->nentries_mask;
src_ring->hw_index = src_ring->sw_index;
src_ring->write_index =
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+ src_ring->write_index &= src_ring->nentries_mask;
ath10k_pci_sleep(ar);
src_ring->per_transfer_context = (void **)ptr;
@@ -1035,8 +1038,10 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_pci_wake(ar);
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ dest_ring->write_index &= dest_ring->nentries_mask;
ath10k_pci_sleep(ar);
dest_ring->per_transfer_context = (void **)ptr;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 2b3426b1ff3f..7226c23b9569 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -100,7 +100,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
goto conn_fail;
/* Start HTC */
- status = ath10k_htc_start(ar->htc);
+ status = ath10k_htc_start(&ar->htc);
if (status)
goto conn_fail;
@@ -116,7 +116,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
return 0;
timeout:
- ath10k_htc_stop(ar->htc);
+ ath10k_htc_stop(&ar->htc);
conn_fail:
return status;
}
@@ -247,19 +247,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
static int ath10k_download_board_data(struct ath10k *ar)
{
+ const struct firmware *fw = ar->board_data;
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
- const struct firmware *fw;
int ret;
- fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(fw)) {
- ath10k_err("could not fetch board data fw file (%ld)\n",
- PTR_ERR(fw));
- return PTR_ERR(fw);
- }
-
ret = ath10k_push_board_ext_data(ar, fw);
if (ret) {
ath10k_err("could not push board ext data (%d)\n", ret);
@@ -286,32 +278,20 @@ static int ath10k_download_board_data(struct ath10k *ar)
}
exit:
- release_firmware(fw);
return ret;
}
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
- const struct firmware *fw;
- u32 address;
+ const struct firmware *fw = ar->otp;
+ u32 address = ar->hw_params.patch_load_addr;
u32 exec_param;
int ret;
/* OTP is optional */
- if (ar->hw_params.fw.otp == NULL) {
- ath10k_info("otp file not defined\n");
- return 0;
- }
-
- address = ar->hw_params.patch_load_addr;
-
- fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
- ar->hw_params.fw.otp);
- if (IS_ERR(fw)) {
- ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
+ if (!ar->otp)
return 0;
- }
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
if (ret) {
@@ -327,28 +307,17 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
}
exit:
- release_firmware(fw);
return ret;
}
static int ath10k_download_fw(struct ath10k *ar)
{
- const struct firmware *fw;
+ const struct firmware *fw = ar->firmware;
u32 address;
int ret;
- if (ar->hw_params.fw.fw == NULL)
- return -EINVAL;
-
address = ar->hw_params.patch_load_addr;
- fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
- ar->hw_params.fw.fw);
- if (IS_ERR(fw)) {
- ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
- return PTR_ERR(fw);
- }
-
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
if (ret) {
ath10k_err("could not write fw (%d)\n", ret);
@@ -356,7 +325,74 @@ static int ath10k_download_fw(struct ath10k *ar)
}
exit:
- release_firmware(fw);
+ return ret;
+}
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
+ if (ar->board_data && !IS_ERR(ar->board_data))
+ release_firmware(ar->board_data);
+
+ if (ar->otp && !IS_ERR(ar->otp))
+ release_firmware(ar->otp);
+
+ if (ar->firmware && !IS_ERR(ar->firmware))
+ release_firmware(ar->firmware);
+
+ ar->board_data = NULL;
+ ar->otp = NULL;
+ ar->firmware = NULL;
+}
+
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+ int ret = 0;
+
+ if (ar->hw_params.fw.fw == NULL) {
+ ath10k_err("firmware file not defined\n");
+ return -EINVAL;
+ }
+
+ if (ar->hw_params.fw.board == NULL) {
+ ath10k_err("board data file not defined");
+ return -EINVAL;
+ }
+
+ ar->board_data = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board_data)) {
+ ret = PTR_ERR(ar->board_data);
+ ath10k_err("could not fetch board data (%d)\n", ret);
+ goto err;
+ }
+
+ ar->firmware = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.fw);
+ if (IS_ERR(ar->firmware)) {
+ ret = PTR_ERR(ar->firmware);
+ ath10k_err("could not fetch firmware (%d)\n", ret);
+ goto err;
+ }
+
+ /* OTP may be undefined. If so, don't fetch it at all */
+ if (ar->hw_params.fw.otp == NULL)
+ return 0;
+
+ ar->otp = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.otp);
+ if (IS_ERR(ar->otp)) {
+ ret = PTR_ERR(ar->otp);
+ ath10k_err("could not fetch otp (%d)\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath10k_core_free_firmware_files(ar);
return ret;
}
@@ -440,8 +476,35 @@ static int ath10k_init_hw_params(struct ath10k *ar)
return 0;
}
+static void ath10k_core_restart(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH10K_STATE_ON:
+ ath10k_halt(ar);
+ ar->state = ATH10K_STATE_RESTARTING;
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH10K_STATE_OFF:
+ /* this can happen if driver is being unloaded */
+ ath10k_warn("cannot restart a device that hasn't been started\n");
+ break;
+ case ATH10K_STATE_RESTARTING:
+ case ATH10K_STATE_RESTARTED:
+ ar->state = ATH10K_STATE_WEDGED;
+ /* fall through */
+ case ATH10K_STATE_WEDGED:
+ ath10k_warn("device is wedged, will not restart\n");
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
- enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
@@ -458,9 +521,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
ar->hif.priv = hif_priv;
ar->hif.ops = hif_ops;
- ar->hif.bus = bus;
-
- ar->free_vdev_map = 0xFF; /* 8 vdevs */
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
@@ -487,6 +547,8 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
init_waitqueue_head(&ar->event_queue);
+ INIT_WORK(&ar->restart_work, ath10k_core_restart);
+
return ar;
err_wq:
@@ -504,24 +566,11 @@ void ath10k_core_destroy(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_core_destroy);
-
-int ath10k_core_register(struct ath10k *ar)
+int ath10k_core_start(struct ath10k *ar)
{
- struct ath10k_htc_ops htc_ops;
- struct bmi_target_info target_info;
int status;
- memset(&target_info, 0, sizeof(target_info));
- status = ath10k_bmi_get_target_info(ar, &target_info);
- if (status)
- goto err;
-
- ar->target_version = target_info.version;
- ar->hw->wiphy->hw_version = target_info.version;
-
- status = ath10k_init_hw_params(ar);
- if (status)
- goto err;
+ ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
status = -EINVAL;
@@ -536,32 +585,32 @@ int ath10k_core_register(struct ath10k *ar)
if (status)
goto err;
- htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete;
+ ar->htc.htc_ops.target_send_suspend_complete =
+ ath10k_send_suspend_complete;
- ar->htc = ath10k_htc_create(ar, &htc_ops);
- if (IS_ERR(ar->htc)) {
- status = PTR_ERR(ar->htc);
- ath10k_err("could not create HTC (%d)\n", status);
+ status = ath10k_htc_init(ar);
+ if (status) {
+ ath10k_err("could not init HTC (%d)\n", status);
goto err;
}
status = ath10k_bmi_done(ar);
if (status)
- goto err_htc_destroy;
+ goto err;
status = ath10k_wmi_attach(ar);
if (status) {
ath10k_err("WMI attach failed: %d\n", status);
- goto err_htc_destroy;
+ goto err;
}
- status = ath10k_htc_wait_target(ar->htc);
+ status = ath10k_htc_wait_target(&ar->htc);
if (status)
goto err_wmi_detach;
- ar->htt = ath10k_htt_attach(ar);
- if (!ar->htt) {
- status = -ENOMEM;
+ status = ath10k_htt_attach(ar);
+ if (status) {
+ ath10k_err("could not attach htt (%d)\n", status);
goto err_wmi_detach;
}
@@ -588,77 +637,127 @@ int ath10k_core_register(struct ath10k *ar)
goto err_disconnect_htc;
}
- status = ath10k_htt_attach_target(ar->htt);
- if (status)
- goto err_disconnect_htc;
-
- status = ath10k_mac_register(ar);
+ status = ath10k_htt_attach_target(&ar->htt);
if (status)
goto err_disconnect_htc;
- status = ath10k_debug_create(ar);
- if (status) {
- ath10k_err("unable to initialize debugfs\n");
- goto err_unregister_mac;
- }
+ ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
return 0;
-err_unregister_mac:
- ath10k_mac_unregister(ar);
err_disconnect_htc:
- ath10k_htc_stop(ar->htc);
+ ath10k_htc_stop(&ar->htc);
err_htt_detach:
- ath10k_htt_detach(ar->htt);
+ ath10k_htt_detach(&ar->htt);
err_wmi_detach:
ath10k_wmi_detach(ar);
-err_htc_destroy:
- ath10k_htc_destroy(ar->htc);
err:
return status;
}
-EXPORT_SYMBOL(ath10k_core_register);
+EXPORT_SYMBOL(ath10k_core_start);
-void ath10k_core_unregister(struct ath10k *ar)
+void ath10k_core_stop(struct ath10k *ar)
{
- /* We must unregister from mac80211 before we stop HTC and HIF.
- * Otherwise we will fail to submit commands to FW and mac80211 will be
- * unhappy about callback failures. */
- ath10k_mac_unregister(ar);
- ath10k_htc_stop(ar->htc);
- ath10k_htt_detach(ar->htt);
+ ath10k_htc_stop(&ar->htc);
+ ath10k_htt_detach(&ar->htt);
ath10k_wmi_detach(ar);
- ath10k_htc_destroy(ar->htc);
}
-EXPORT_SYMBOL(ath10k_core_unregister);
+EXPORT_SYMBOL(ath10k_core_stop);
-int ath10k_core_target_suspend(struct ath10k *ar)
+/* mac80211 manages fw/hw initialization through start/stop hooks. However in
+ * order to know what hw capabilities should be advertised to mac80211 it is
+ * necessary to load the firmware (and tear it down immediately since start
+ * hook will try to init it again) before registering */
+static int ath10k_core_probe_fw(struct ath10k *ar)
{
- int ret;
+ struct bmi_target_info target_info;
+ int ret = 0;
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err("could not start pci hif (%d)\n", ret);
+ return ret;
+ }
- ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+ memset(&target_info, 0, sizeof(target_info));
+ ret = ath10k_bmi_get_target_info(ar, &target_info);
+ if (ret) {
+ ath10k_err("could not get target info (%d)\n", ret);
+ ath10k_hif_power_down(ar);
+ return ret;
+ }
- ret = ath10k_wmi_pdev_suspend_target(ar);
- if (ret)
- ath10k_warn("could not suspend target (%d)\n", ret);
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
- return ret;
+ ret = ath10k_init_hw_params(ar);
+ if (ret) {
+ ath10k_err("could not get hw params (%d)\n", ret);
+ ath10k_hif_power_down(ar);
+ return ret;
+ }
+
+ ret = ath10k_core_fetch_firmware_files(ar);
+ if (ret) {
+ ath10k_err("could not fetch firmware files (%d)\n", ret);
+ ath10k_hif_power_down(ar);
+ return ret;
+ }
+
+ ret = ath10k_core_start(ar);
+ if (ret) {
+ ath10k_err("could not init core (%d)\n", ret);
+ ath10k_core_free_firmware_files(ar);
+ ath10k_hif_power_down(ar);
+ return ret;
+ }
+
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+ return 0;
}
-EXPORT_SYMBOL(ath10k_core_target_suspend);
-int ath10k_core_target_resume(struct ath10k *ar)
+int ath10k_core_register(struct ath10k *ar)
{
- int ret;
+ int status;
- ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+ status = ath10k_core_probe_fw(ar);
+ if (status) {
+ ath10k_err("could not probe fw (%d)\n", status);
+ return status;
+ }
- ret = ath10k_wmi_pdev_resume_target(ar);
- if (ret)
- ath10k_warn("could not resume target (%d)\n", ret);
+ status = ath10k_mac_register(ar);
+ if (status) {
+ ath10k_err("could not register to mac80211 (%d)\n", status);
+ goto err_release_fw;
+ }
- return ret;
+ status = ath10k_debug_create(ar);
+ if (status) {
+ ath10k_err("unable to initialize debugfs\n");
+ goto err_unregister_mac;
+ }
+
+ return 0;
+
+err_unregister_mac:
+ ath10k_mac_unregister(ar);
+err_release_fw:
+ ath10k_core_free_firmware_files(ar);
+ return status;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+ /* We must unregister from mac80211 before we stop HTC and HIF.
+ * Otherwise we will fail to submit commands to FW and mac80211 will be
+ * unhappy about callback failures. */
+ ath10k_mac_unregister(ar);
+ ath10k_core_free_firmware_files(ar);
}
-EXPORT_SYMBOL(ath10k_core_target_resume);
+EXPORT_SYMBOL(ath10k_core_unregister);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 539336d1be4b..e4bba563ed42 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/pci.h>
+#include "htt.h"
#include "htc.h"
#include "hw.h"
#include "targaddrs.h"
@@ -37,16 +38,13 @@
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
+#define ATH10K_NUM_CHANS 38
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
struct ath10k;
-enum ath10k_bus {
- ATH10K_BUS_PCI,
-};
-
struct ath10k_skb_cb {
dma_addr_t paddr;
bool is_mapped;
@@ -250,6 +248,28 @@ struct ath10k_debug {
struct completion event_stats_compl;
};
+enum ath10k_state {
+ ATH10K_STATE_OFF = 0,
+ ATH10K_STATE_ON,
+
+ /* When doing firmware recovery the device is first powered down.
+ * mac80211 is supposed to call in to start() hook later on. It is
+ * however possible that driver unloading and firmware crash overlap.
+ * mac80211 can wait on conf_mutex in stop() while the device is
+ * stopped in ath10k_core_restart() work holding conf_mutex. The state
+ * RESTARTED means that the device is up and mac80211 has started hw
+ * reconfiguration. Once mac80211 is done with the reconfiguration we
+ * set the state to STATE_ON in restart_complete(). */
+ ATH10K_STATE_RESTARTING,
+ ATH10K_STATE_RESTARTED,
+
+ /* The device has crashed while restarting hw. This state is like ON
+ * but commands are blocked in HTC and -ECOMM response is given. This
+ * prevents completion timeouts and makes the driver more responsive to
+ * userspace commands. This is also prevents recursive recovery. */
+ ATH10K_STATE_WEDGED,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -266,6 +286,7 @@ struct ath10k {
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
+ u32 num_rf_chains;
struct targetdef *targetdef;
struct hostdef *hostdef;
@@ -274,19 +295,16 @@ struct ath10k {
struct {
void *priv;
- enum ath10k_bus bus;
const struct ath10k_hif_ops *ops;
} hif;
- struct ath10k_wmi wmi;
-
wait_queue_head_t event_queue;
bool is_target_paused;
struct ath10k_bmi bmi;
-
- struct ath10k_htc *htc;
- struct ath10k_htt *htt;
+ struct ath10k_wmi wmi;
+ struct ath10k_htc htc;
+ struct ath10k_htt htt;
struct ath10k_hw_params {
u32 id;
@@ -301,6 +319,10 @@ struct ath10k {
} fw;
} hw_params;
+ const struct firmware *board_data;
+ const struct firmware *otp;
+ const struct firmware *firmware;
+
struct {
struct completion started;
struct completion completed;
@@ -350,20 +372,28 @@ struct ath10k {
struct completion offchan_tx_completed;
struct sk_buff *offchan_tx_skb;
+ enum ath10k_state state;
+
+ struct work_struct restart_work;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+ struct survey_info survey[ATH10K_NUM_CHANS];
+
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
};
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
- enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
+int ath10k_core_start(struct ath10k *ar);
+void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar);
void ath10k_core_unregister(struct ath10k *ar);
-int ath10k_core_target_suspend(struct ath10k *ar);
-int ath10k_core_target_resume(struct ath10k *ar);
-
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 499034b873d1..3d65594fa098 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_pdev_stats *ps;
int i;
- mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->data_lock);
stats = &ar->debug.target_stats;
@@ -259,6 +259,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
}
+ spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
complete(&ar->debug.event_stats_compl);
}
@@ -268,35 +269,35 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
{
struct ath10k *ar = file->private_data;
struct ath10k_target_stats *fw_stats;
- char *buf;
+ char *buf = NULL;
unsigned int len = 0, buf_len = 2500;
- ssize_t ret_cnt;
+ ssize_t ret_cnt = 0;
long left;
int i;
int ret;
fw_stats = &ar->debug.target_stats;
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON)
+ goto exit;
+
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return -ENOMEM;
+ goto exit;
ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
if (ret) {
ath10k_warn("could not request stats (%d)\n", ret);
- kfree(buf);
- return -EIO;
+ goto exit;
}
left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
+ if (left <= 0)
+ goto exit;
- if (left <= 0) {
- kfree(buf);
- return -ETIMEDOUT;
- }
-
- mutex_lock(&ar->conf_mutex);
-
+ spin_lock_bh(&ar->data_lock);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PDEV stats");
@@ -424,14 +425,15 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
fw_stats->peer_stat[i].peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
+ spin_unlock_bh(&ar->data_lock);
if (len > buf_len)
len = buf_len;
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+exit:
mutex_unlock(&ar->conf_mutex);
-
kfree(buf);
return ret_cnt;
}
@@ -443,6 +445,60 @@ static const struct file_operations fops_fw_stats = {
.llseek = default_llseek,
};
+static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] = "To simulate firmware crash write the keyword"
+ " `crash` to this file.\nThis will force firmware"
+ " to report a crash to the host system.\n";
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32] = {};
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ath10k_info("simulating firmware crash\n");
+
+ ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+ if (ret)
+ ath10k_warn("failed to force fw hang (%d)\n", ret);
+
+ if (ret == 0)
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath10k_read_simulate_fw_crash,
+ .write = ath10k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -459,6 +515,9 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_wmi_services);
+ debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_simulate_fw_crash);
+
return 0;
}
#endif /* CONFIG_ATH10K_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 73a24d44d1b4..dcdea68bcc0a 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -46,8 +46,11 @@ struct ath10k_hif_ops {
void *request, u32 request_len,
void *response, u32 *response_len);
+ /* Post BMI phase, after FW is loaded. Starts regular operation */
int (*start)(struct ath10k *ar);
+ /* Clean up what start() did. This does not revert to BMI phase. If
+ * desired so, call power_down() and power_up() */
void (*stop)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
@@ -66,10 +69,20 @@ struct ath10k_hif_ops {
*/
void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
- void (*init)(struct ath10k *ar,
- struct ath10k_hif_cb *callbacks);
+ void (*set_callbacks)(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks);
u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+
+ /* Power up the device and enter BMI transfer mode for FW download */
+ int (*power_up)(struct ath10k *ar);
+
+ /* Power down the device and free up resources. stop() must be called
+ * before this if start() was called earlier */
+ void (*power_down)(struct ath10k *ar);
+
+ int (*suspend)(struct ath10k *ar);
+ int (*resume)(struct ath10k *ar);
};
@@ -122,10 +135,10 @@ static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
ar->hif.ops->send_complete_check(ar, pipe_id, force);
}
-static inline void ath10k_hif_init(struct ath10k *ar,
- struct ath10k_hif_cb *callbacks)
+static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks)
{
- ar->hif.ops->init(ar, callbacks);
+ ar->hif.ops->set_callbacks(ar, callbacks);
}
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
@@ -134,4 +147,30 @@ static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
return ar->hif.ops->get_free_queue_number(ar, pipe_id);
}
+static inline int ath10k_hif_power_up(struct ath10k *ar)
+{
+ return ar->hif.ops->power_up(ar);
+}
+
+static inline void ath10k_hif_power_down(struct ath10k *ar)
+{
+ ar->hif.ops->power_down(ar);
+}
+
+static inline int ath10k_hif_suspend(struct ath10k *ar)
+{
+ if (!ar->hif.ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->suspend(ar);
+}
+
+static inline int ath10k_hif_resume(struct ath10k *ar)
+{
+ if (!ar->hif.ops->resume)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->resume(ar);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 74363c949392..ef3329ef52f3 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -246,15 +246,22 @@ int ath10k_htc_send(struct ath10k_htc *htc,
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ if (htc->ar->state == ATH10K_STATE_WEDGED)
+ return -ECOMM;
+
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn("Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
- skb_push(skb, sizeof(struct ath10k_htc_hdr));
-
spin_lock_bh(&htc->tx_lock);
+ if (htc->stopped) {
+ spin_unlock_bh(&htc->tx_lock);
+ return -ESHUTDOWN;
+ }
+
__skb_queue_tail(&ep->tx_queue, skb);
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
spin_unlock_bh(&htc->tx_lock);
queue_work(htc->ar->workqueue, &ep->send_work);
@@ -265,25 +272,19 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
struct sk_buff *skb,
unsigned int eid)
{
- struct ath10k_htc *htc = ar->htc;
+ struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
- bool stopping;
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
+ /* note: when using TX credit flow, the re-checking of queues happens
+ * when credits flow back from the target. in the non-TX credit case,
+ * we recheck after the packet completes */
spin_lock_bh(&htc->tx_lock);
- stopping = htc->stopping;
- spin_unlock_bh(&htc->tx_lock);
-
- if (!ep->tx_credit_flow_enabled && !stopping)
- /*
- * note: when using TX credit flow, the re-checking of
- * queues happens when credits flow back from the target.
- * in the non-TX credit case, we recheck after the packet
- * completes
- */
+ if (!ep->tx_credit_flow_enabled && !htc->stopped)
queue_work(ar->workqueue, &ep->send_work);
+ spin_unlock_bh(&htc->tx_lock);
return 0;
}
@@ -414,7 +415,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
u8 pipe_id)
{
int status = 0;
- struct ath10k_htc *htc = ar->htc;
+ struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_hdr *hdr;
struct ath10k_htc_ep *ep;
u16 payload_len;
@@ -751,8 +752,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
- ath10k_warn("HTC Service %s does not allocate target credits\n",
- htc_service_name(conn_req->service_id));
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "HTC Service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb) {
@@ -947,7 +949,7 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
struct ath10k_htc_ep *ep;
spin_lock_bh(&htc->tx_lock);
- htc->stopping = true;
+ htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
@@ -956,26 +958,18 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
}
ath10k_hif_stop(htc->ar);
- ath10k_htc_reset_endpoint_states(htc);
}
/* registered target arrival callback from the HIF layer */
-struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
- struct ath10k_htc_ops *htc_ops)
+int ath10k_htc_init(struct ath10k *ar)
{
struct ath10k_hif_cb htc_callbacks;
struct ath10k_htc_ep *ep = NULL;
- struct ath10k_htc *htc = NULL;
-
- /* FIXME: use struct ath10k instead */
- htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
- if (!htc)
- return ERR_PTR(-ENOMEM);
+ struct ath10k_htc *htc = &ar->htc;
spin_lock_init(&htc->tx_lock);
- memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops));
-
+ htc->stopped = false;
ath10k_htc_reset_endpoint_states(htc);
/* setup HIF layer callbacks */
@@ -986,15 +980,10 @@ struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
/* Get HIF default pipe for HTC message exchange */
ep = &htc->endpoint[ATH10K_HTC_EP_0];
- ath10k_hif_init(ar, &htc_callbacks);
+ ath10k_hif_set_callbacks(ar, &htc_callbacks);
ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
init_completion(&htc->ctl_resp);
- return htc;
-}
-
-void ath10k_htc_destroy(struct ath10k_htc *htc)
-{
- kfree(htc);
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index fa45844b59fb..e1dd8c761853 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -335,7 +335,7 @@ struct ath10k_htc {
struct ath10k *ar;
struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
- /* protects endpoint and stopping fields */
+ /* protects endpoint and stopped fields */
spinlock_t tx_lock;
struct ath10k_htc_ops htc_ops;
@@ -349,11 +349,10 @@ struct ath10k_htc {
struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
int target_credit_size;
- bool stopping;
+ bool stopped;
};
-struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
- struct ath10k_htc_ops *htc_ops);
+int ath10k_htc_init(struct ath10k *ar);
int ath10k_htc_wait_target(struct ath10k_htc *htc);
int ath10k_htc_start(struct ath10k_htc *htc);
int ath10k_htc_connect_service(struct ath10k_htc *htc,
@@ -362,7 +361,6 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
void ath10k_htc_stop(struct ath10k_htc *htc);
-void ath10k_htc_destroy(struct ath10k_htc *htc);
struct sk_buff *ath10k_htc_alloc_skb(int size);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 185a5468a2f2..39342c5cfcb2 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -16,6 +16,7 @@
*/
#include <linux/slab.h>
+#include <linux/if_ether.h>
#include "htt.h"
#include "core.h"
@@ -36,7 +37,7 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
- status = ath10k_htc_connect_service(htt->ar->htc, &conn_req,
+ status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
&conn_resp);
if (status)
@@ -47,15 +48,11 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
return 0;
}
-struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
+int ath10k_htt_attach(struct ath10k *ar)
{
- struct ath10k_htt *htt;
+ struct ath10k_htt *htt = &ar->htt;
int ret;
- htt = kzalloc(sizeof(*htt), GFP_KERNEL);
- if (!htt)
- return NULL;
-
htt->ar = ar;
htt->max_throughput_mbps = 800;
@@ -65,8 +62,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
* since ath10k_htt_rx_attach involves sending a rx ring configure
* message to the target.
*/
- if (ath10k_htt_htc_attach(htt))
+ ret = ath10k_htt_htc_attach(htt);
+ if (ret) {
+ ath10k_err("could not attach htt htc (%d)\n", ret);
goto err_htc_attach;
+ }
ret = ath10k_htt_tx_attach(htt);
if (ret) {
@@ -74,8 +74,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
goto err_htc_attach;
}
- if (ath10k_htt_rx_attach(htt))
+ ret = ath10k_htt_rx_attach(htt);
+ if (ret) {
+ ath10k_err("could not attach htt rx (%d)\n", ret);
goto err_rx_attach;
+ }
/*
* Prefetch enough data to satisfy target
@@ -89,13 +92,12 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
- return htt;
+ return 0;
err_rx_attach:
ath10k_htt_tx_detach(htt);
err_htc_attach:
- kfree(htt);
- return NULL;
+ return ret;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
@@ -148,5 +150,4 @@ void ath10k_htt_detach(struct ath10k_htt *htt)
{
ath10k_htt_rx_detach(htt);
ath10k_htt_tx_detach(htt);
- kfree(htt);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index a7a7aa040536..318be4629cde 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -20,7 +20,6 @@
#include <linux/bug.h>
-#include "core.h"
#include "htc.h"
#include "rx_desc.h"
@@ -1317,7 +1316,7 @@ struct htt_rx_desc {
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
-struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar);
+int ath10k_htt_attach(struct ath10k *ar);
int ath10k_htt_attach_target(struct ath10k_htt *htt);
void ath10k_htt_detach(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index de058d7adca8..e784c40b904b 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -15,6 +15,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include "core.h"
#include "htc.h"
#include "htt.h"
#include "txrx.h"
@@ -803,6 +804,37 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
return false;
}
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags, info;
+ bool is_ip4, is_ip6;
+ bool is_tcp, is_udp;
+ bool ip_csum_ok, tcpudp_csum_ok;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+ info = __le32_to_cpu(rxd->msdu_start.info1);
+
+ is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
+ is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
+ is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
+ is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
+ ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
+ tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
+
+ if (!is_ip4 && !is_ip6)
+ return CHECKSUM_NONE;
+ if (!is_tcp && !is_udp)
+ return CHECKSUM_NONE;
+ if (!ip_csum_ok)
+ return CHECKSUM_NONE;
+ if (!tcpudp_csum_ok)
+ return CHECKSUM_NONE;
+
+ return CHECKSUM_UNNECESSARY;
+}
+
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
@@ -814,6 +846,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
u8 *fw_desc;
int i, j;
int ret;
+ int ip_summed;
memset(&info, 0, sizeof(info));
@@ -888,6 +921,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
+ /* The skb is not yet processed and it may be
+ * reallocated. Since the offload is in the original
+ * skb extract the checksum now and assign it later */
+ ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
+
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -913,6 +951,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
+ info.skb->ip_summed = ip_summed;
+
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
info.skb->data, info.skb->len);
ath10k_process_rx(htt->ar, &info);
@@ -979,6 +1019,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
info.status = HTT_RX_IND_MPDU_STATUS_OK;
info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
if (tkip_mic_err) {
ath10k_warn("tkip mic error\n");
@@ -1036,7 +1077,7 @@ end:
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
- struct ath10k_htt *htt = ar->htt;
+ struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
/* confirm alignment */
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index ef79106db247..656c2546b294 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -92,7 +92,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
/* At the beginning free queue number should hint us the maximum
* queue length */
- pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id;
+ pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id;
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
pipe);
@@ -153,7 +153,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- struct ath10k_htt *htt = ar->htt;
+ struct ath10k_htt *htt = &ar->htt;
if (skb_cb->htt.is_conf) {
dev_kfree_skb_any(skb);
@@ -194,7 +194,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
ATH10K_SKB_CB(skb)->htt.is_conf = true;
- ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
@@ -281,7 +281,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
ATH10K_SKB_CB(skb)->htt.is_conf = true;
- ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
@@ -346,7 +346,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.refcount = 2;
skb_cb->htt.msdu = msdu;
- res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+ res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
@@ -465,6 +465,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
@@ -486,7 +488,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txfrag = txfrag;
skb_cb->htt.msdu = msdu;
- res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+ res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index da5c333d0d4b..cf2ba4d850c9 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -20,6 +20,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+#include "hif.h"
#include "core.h"
#include "debug.h"
#include "wmi.h"
@@ -43,6 +44,8 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
.macaddr = macaddr,
};
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
arg.key_flags = WMI_KEY_PAIRWISE;
else
@@ -87,6 +90,8 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
struct ath10k *ar = arvif->ar;
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
INIT_COMPLETION(ar->install_key_done);
ret = ath10k_send_key(arvif, key, cmd, macaddr);
@@ -327,6 +332,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
return 0;
}
+static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
+{
+ if (value != 0xFFFFFFFF)
+ value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
+ ATH10K_RTS_MAX);
+
+ return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_RTS_THRESHOLD,
+ value);
+}
+
+static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
+{
+ if (value != 0xFFFFFFFF)
+ value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
+ ATH10K_FRAGMT_THRESHOLD_MIN,
+ ATH10K_FRAGMT_THRESHOLD_MAX);
+
+ return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ value);
+}
+
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
{
int ret;
@@ -364,6 +392,20 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
spin_unlock_bh(&ar->data_lock);
}
+static void ath10k_peer_cleanup_all(struct ath10k *ar)
+{
+ struct ath10k_peer *peer, *tmp;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ar->data_lock);
+}
+
/************************/
/* Interface management */
/************************/
@@ -372,6 +414,8 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
{
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
ret = wait_for_completion_timeout(&ar->vdev_setup_done,
ATH10K_VDEV_SETUP_TIMEOUT_HZ);
if (ret == 0)
@@ -605,6 +649,8 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
{
int ret = 0;
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
if (!info->enable_beacon) {
ath10k_vdev_stop(arvif);
return;
@@ -631,6 +677,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
{
int ret = 0;
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
if (!info->ibss_joined) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
if (ret)
@@ -680,6 +728,8 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
enum wmi_sta_ps_mode psmode;
int ret;
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
if (vif->type != NL80211_IFTYPE_STATION)
return;
@@ -722,6 +772,8 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
+ lockdep_assert_held(&ar->conf_mutex);
+
memcpy(arg->addr, sta->addr, ETH_ALEN);
arg->vdev_id = arvif->vdev_id;
arg->peer_aid = sta->aid;
@@ -764,6 +816,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
+ lockdep_assert_held(&ar->conf_mutex);
+
bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
info->bssid, NULL, 0, 0, 0);
if (bss) {
@@ -804,6 +858,8 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
u32 ratemask;
int i;
+ lockdep_assert_held(&ar->conf_mutex);
+
sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
rates = sband->bitrates;
@@ -827,6 +883,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
int smps;
int i, n;
+ lockdep_assert_held(&ar->conf_mutex);
+
if (!ht_cap->ht_supported)
return;
@@ -905,6 +963,8 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
u32 uapsd = 0;
u32 max_sp = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
if (sta->wme)
arg->peer_flags |= WMI_PEER_QOS;
@@ -1056,6 +1116,8 @@ static int ath10k_peer_assoc(struct ath10k *ar,
{
struct wmi_peer_assoc_complete_arg arg;
+ lockdep_assert_held(&ar->conf_mutex);
+
memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
@@ -1079,6 +1141,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_sta *ap_sta;
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
rcu_read_lock();
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
@@ -1119,6 +1183,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
/*
* For some reason, calling VDEV-DOWN before VDEV-STOP
* makes the FW to send frames via HTT after disassociation.
@@ -1152,6 +1218,8 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
{
int ret = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
if (ret) {
ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
@@ -1172,6 +1240,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
{
int ret = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
ret = ath10k_clear_peer_keys(arvif, sta->addr);
if (ret) {
ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
@@ -1198,6 +1268,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
int ret;
int i;
+ lockdep_assert_held(&ar->conf_mutex);
+
bands = hw->wiphy->bands;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!bands[band])
@@ -1276,21 +1348,19 @@ static int ath10k_update_channel_list(struct ath10k *ar)
return ret;
}
-static void ath10k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void ath10k_regd_update(struct ath10k *ar)
{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct reg_dmn_pair_mapping *regpair;
- struct ath10k *ar = hw->priv;
int ret;
- ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+ lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_update_channel_list(ar);
if (ret)
ath10k_warn("could not update channel list (%d)\n", ret);
regpair = ar->ath_common.regulatory.regpair;
+
/* Target allows setting up per-band regdomain but ath_common provides
* a combined one only */
ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1303,6 +1373,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
ath10k_warn("could not set pdev regdomain (%d)\n", ret);
}
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath10k *ar = hw->priv;
+
+ ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state == ATH10K_STATE_ON)
+ ath10k_regd_update(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
/***************/
/* TX handlers */
/***************/
@@ -1322,9 +1406,9 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
return;
qos_ctl = ieee80211_get_qos_ctl(hdr);
- memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN,
- skb->len - ieee80211_hdrlen(hdr->frame_control));
- skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
}
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
@@ -1397,15 +1481,15 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
int ret;
if (ieee80211_is_mgmt(hdr->frame_control))
- ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+ ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
else if (ieee80211_is_nullfunc(hdr->frame_control))
/* FW does not report tx status properly for NullFunc frames
* unless they are sent through mgmt tx path. mac80211 sends
* those frames when it detects link/beacon loss and depends on
* the tx status to be correct. */
- ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+ ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
else
- ret = ath10k_htt_tx(ar->htt, skb);
+ ret = ath10k_htt_tx(&ar->htt, skb);
if (ret) {
ath10k_warn("tx failed (%d). dropping packet.\n", ret);
@@ -1552,6 +1636,10 @@ static int ath10k_abort_scan(struct ath10k *ar)
ret = ath10k_wmi_stop_scan(ar, &arg);
if (ret) {
ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.in_progress = false;
+ ath10k_offchan_tx_purge(ar);
+ spin_unlock_bh(&ar->data_lock);
return -EIO;
}
@@ -1645,10 +1733,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
- ath10k_tx_h_qos_workaround(hw, control, skb);
- ath10k_tx_h_update_wep_key(skb);
- ath10k_tx_h_add_p2p_noa_ie(ar, skb);
- ath10k_tx_h_seq_no(skb);
+ /* it makes no sense to process injected frames like that */
+ if (info->control.vif &&
+ info->control.vif->type != NL80211_IFTYPE_MONITOR) {
+ ath10k_tx_h_qos_workaround(hw, control, skb);
+ ath10k_tx_h_update_wep_key(skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, skb);
+ ath10k_tx_h_seq_no(skb);
+ }
memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
@@ -1673,10 +1765,57 @@ static void ath10k_tx(struct ieee80211_hw *hw,
/*
* Initialize various parameters with default vaules.
*/
+void ath10k_halt(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ del_timer_sync(&ar->scan.timeout);
+ ath10k_offchan_tx_purge(ar);
+ ath10k_peer_cleanup_all(ar);
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.in_progress) {
+ del_timer(&ar->scan.timeout);
+ ar->scan.in_progress = false;
+ ieee80211_scan_completed(ar->hw, true);
+ }
+ spin_unlock_bh(&ar->data_lock);
+}
+
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
- int ret;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_OFF &&
+ ar->state != ATH10K_STATE_RESTARTING) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err("could not init hif (%d)\n", ret);
+ ar->state = ATH10K_STATE_OFF;
+ goto exit;
+ }
+
+ ret = ath10k_core_start(ar);
+ if (ret) {
+ ath10k_err("could not init core (%d)\n", ret);
+ ath10k_hif_power_down(ar);
+ ar->state = ATH10K_STATE_OFF;
+ goto exit;
+ }
+
+ if (ar->state == ATH10K_STATE_OFF)
+ ar->state = ATH10K_STATE_ON;
+ else if (ar->state == ATH10K_STATE_RESTARTING)
+ ar->state = ATH10K_STATE_RESTARTED;
ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
if (ret)
@@ -1688,6 +1827,10 @@ static int ath10k_start(struct ieee80211_hw *hw)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
+ ath10k_regd_update(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
return 0;
}
@@ -1695,18 +1838,48 @@ static void ath10k_stop(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
- /* avoid leaks in case FW never confirms scan for offchannel */
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_RESTARTED ||
+ ar->state == ATH10K_STATE_WEDGED)
+ ath10k_halt(ar);
+
+ ar->state = ATH10K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
cancel_work_sync(&ar->offchan_tx_work);
- ath10k_offchan_tx_purge(ar);
+ cancel_work_sync(&ar->restart_work);
}
-static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+static void ath10k_config_ps(struct ath10k *ar)
{
struct ath10k_generic_iter ar_iter;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* During HW reconfiguration mac80211 reports all interfaces that were
+ * running until reconfiguration was started. Since FW doesn't have any
+ * vdevs at this point we must not iterate over this interface list.
+ * This setting will be updated upon add_interface(). */
+ if (ar->state == ATH10K_STATE_RESTARTED)
+ return;
+
+ memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+ ar_iter.ar = ar;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ ar->hw, IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_ps_iter, &ar_iter);
+
+ if (ar_iter.ret)
+ ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+{
struct ath10k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
- u32 flags;
mutex_lock(&ar->conf_mutex);
@@ -1718,18 +1891,8 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_bh(&ar->data_lock);
}
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
- flags = IEEE80211_IFACE_ITER_RESUME_ALL;
-
- ieee80211_iterate_active_interfaces_atomic(hw,
- flags,
- ath10k_ps_iter,
- &ar_iter);
-
- ret = ar_iter.ret;
- }
+ if (changed & IEEE80211_CONF_CHANGE_PS)
+ ath10k_config_ps(ar);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR)
@@ -1738,6 +1901,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_monitor_destroy(ar);
}
+ ath10k_wmi_flush_tx(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -1761,6 +1925,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
+ memset(arvif, 0, sizeof(*arvif));
+
arvif->ar = ar;
arvif->vif = vif;
@@ -1859,6 +2025,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
}
+ ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+ if (ret)
+ ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+
+ ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+ if (ret)
+ ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ar->monitor_present = true;
@@ -2164,6 +2340,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
arg.ssids[i].len = req->ssids[i].ssid_len;
arg.ssids[i].ssid = req->ssids[i].ssid;
}
+ } else {
+ arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
}
if (req->n_channels) {
@@ -2363,6 +2541,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
u32 value = 0;
int ret = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
return 0;
@@ -2558,11 +2738,16 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
- rts = min_t(u32, rts, ATH10K_RTS_MAX);
+ lockdep_assert_held(&arvif->ar->conf_mutex);
- ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_RTS_THRESHOLD,
- rts);
+ /* During HW reconfiguration mac80211 reports all interfaces that were
+ * running until reconfiguration was started. Since FW doesn't have any
+ * vdevs at this point we must not iterate over this interface list.
+ * This setting will be updated upon add_interface(). */
+ if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
+ return;
+
+ ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
if (ar_iter->ret)
ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
arvif->vdev_id);
@@ -2581,8 +2766,9 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
ar_iter.ar = ar;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
- ath10k_set_rts_iter, &ar_iter);
+ ieee80211_iterate_active_interfaces_atomic(
+ hw, IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_set_rts_iter, &ar_iter);
mutex_unlock(&ar->conf_mutex);
return ar_iter.ret;
@@ -2593,17 +2779,17 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath10k_generic_iter *ar_iter = data;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
- int ret;
- frag = clamp_t(u32, frag,
- ATH10K_FRAGMT_THRESHOLD_MIN,
- ATH10K_FRAGMT_THRESHOLD_MAX);
+ lockdep_assert_held(&arvif->ar->conf_mutex);
- ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
- frag);
+ /* During HW reconfiguration mac80211 reports all interfaces that were
+ * running until reconfiguration was started. Since FW doesn't have any
+ * vdevs at this point we must not iterate over this interface list.
+ * This setting will be updated upon add_interface(). */
+ if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
+ return;
- ar_iter->ret = ret;
+ ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
if (ar_iter->ret)
ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
arvif->vdev_id);
@@ -2622,8 +2808,9 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
ar_iter.ar = ar;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
- ath10k_set_frag_iter, &ar_iter);
+ ieee80211_iterate_active_interfaces_atomic(
+ hw, IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_set_frag_iter, &ar_iter);
mutex_unlock(&ar->conf_mutex);
return ar_iter.ret;
@@ -2632,6 +2819,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
+ bool skip;
int ret;
/* mac80211 doesn't care if we really xmit queued frames or not
@@ -2639,16 +2827,29 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
if (drop)
return;
- ret = wait_event_timeout(ar->htt->empty_tx_wq, ({
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_WEDGED)
+ goto skip;
+
+ ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
bool empty;
- spin_lock_bh(&ar->htt->tx_lock);
- empty = bitmap_empty(ar->htt->used_msdu_ids,
- ar->htt->max_num_pending_tx);
- spin_unlock_bh(&ar->htt->tx_lock);
- (empty);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ empty = bitmap_empty(ar->htt.used_msdu_ids,
+ ar->htt.max_num_pending_tx);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ skip = (ar->state == ATH10K_STATE_WEDGED);
+
+ (empty || skip);
}), ATH10K_FLUSH_TIMEOUT_HZ);
- if (ret <= 0)
+
+ if (ret <= 0 || skip)
ath10k_warn("tx not flushed\n");
+
+skip:
+ mutex_unlock(&ar->conf_mutex);
}
/* TODO: Implement this function properly
@@ -2660,6 +2861,118 @@ static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
return 1;
}
+#ifdef CONFIG_PM
+static int ath10k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ ar->is_target_paused = false;
+
+ ret = ath10k_wmi_pdev_suspend_target(ar);
+ if (ret) {
+ ath10k_warn("could not suspend target (%d)\n", ret);
+ return 1;
+ }
+
+ ret = wait_event_interruptible_timeout(ar->event_queue,
+ ar->is_target_paused == true,
+ 1 * HZ);
+ if (ret < 0) {
+ ath10k_warn("suspend interrupted (%d)\n", ret);
+ goto resume;
+ } else if (ret == 0) {
+ ath10k_warn("suspend timed out - target pause event never came\n");
+ goto resume;
+ }
+
+ ret = ath10k_hif_suspend(ar);
+ if (ret) {
+ ath10k_warn("could not suspend hif (%d)\n", ret);
+ goto resume;
+ }
+
+ return 0;
+resume:
+ ret = ath10k_wmi_pdev_resume_target(ar);
+ if (ret)
+ ath10k_warn("could not resume target (%d)\n", ret);
+ return 1;
+}
+
+static int ath10k_resume(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ ret = ath10k_hif_resume(ar);
+ if (ret) {
+ ath10k_warn("could not resume hif (%d)\n", ret);
+ return 1;
+ }
+
+ ret = ath10k_wmi_pdev_resume_target(ar);
+ if (ret) {
+ ath10k_warn("could not resume target (%d)\n", ret);
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+static void ath10k_restart_complete(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* If device failed to restart it will be in a different state, e.g.
+ * ATH10K_STATE_WEDGED */
+ if (ar->state == ATH10K_STATE_RESTARTED) {
+ ath10k_info("device successfully recovered\n");
+ ar->state = ATH10K_STATE_ON;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey = &ar->survey[idx];
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
@@ -2680,6 +2993,12 @@ static const struct ieee80211_ops ath10k_ops = {
.set_frag_threshold = ath10k_set_frag_threshold,
.flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon,
+ .restart_complete = ath10k_restart_complete,
+ .get_survey = ath10k_get_survey,
+#ifdef CONFIG_PM
+ .suspend = ath10k_suspend,
+ .resume = ath10k_resume,
+#endif
};
#define RATETAB_ENT(_rate, _rateid, _flags) { \
@@ -2797,9 +3116,15 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
.max = 8,
.types = BIT(NL80211_IFTYPE_STATION)
| BIT(NL80211_IFTYPE_P2P_CLIENT)
- | BIT(NL80211_IFTYPE_P2P_GO)
- | BIT(NL80211_IFTYPE_AP)
- }
+ },
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_P2P_GO)
+ },
+ {
+ .max = 7,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
};
static const struct ieee80211_iface_combination ath10k_if_comb = {
@@ -2814,19 +3139,18 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 mcs_map;
+ int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->vht_cap_info;
- /* FIXME: check dynamically how many streams board supports */
- mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+ mcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_rf_chains)
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
+ else
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
+ }
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
@@ -2889,7 +3213,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
- for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++)
+ for (i = 0; i < ar->num_rf_chains; i++)
ht_cap.mcs.rx_mask[i] = 0xFF;
ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
@@ -2948,8 +3272,10 @@ int ath10k_mac_register(struct ath10k *ar)
channels = kmemdup(ath10k_2ghz_channels,
sizeof(ath10k_2ghz_channels),
GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
+ if (!channels) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
@@ -2968,11 +3294,8 @@ int ath10k_mac_register(struct ath10k *ar)
sizeof(ath10k_5ghz_channels),
GFP_KERNEL);
if (!channels) {
- if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
- band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
- kfree(band->channels);
- }
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free;
}
band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
@@ -3032,29 +3355,36 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations = 1;
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
ath10k_err("Regulatory initialization failed\n");
- return ret;
+ goto err_free;
}
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath10k_err("ieee80211 registration failed: %d\n", ret);
- return ret;
+ goto err_free;
}
if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
ret = regulatory_hint(ar->hw->wiphy,
ar->ath_common.regulatory.alpha2);
if (ret)
- goto exit;
+ goto err_unregister;
}
return 0;
-exit:
+
+err_unregister:
ieee80211_unregister_hw(ar->hw);
+err_free:
+ kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 27fc92e58829..6fce9bfb19a5 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -34,6 +34,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void ath10k_reset_scan(unsigned long ptr);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_halt(struct ath10k *ar);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 33af4672c909..e2f9ef50b1bd 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -32,7 +32,7 @@
#include "ce.h"
#include "pci.h"
-unsigned int ath10k_target_ps;
+static unsigned int ath10k_target_ps;
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
@@ -54,6 +54,10 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
int num);
static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
+static void ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_reset_target(struct ath10k *ar);
+static int ath10k_pci_start_intr(struct ath10k *ar);
+static void ath10k_pci_stop_intr(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* host->target HTC control and raw streams */
@@ -718,6 +722,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
reg_dump_values[i + 1],
reg_dump_values[i + 2],
reg_dump_values[i + 3]);
+
+ ieee80211_queue_work(ar->hw, &ar->restart_work);
}
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
@@ -744,8 +750,8 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
ath10k_ce_per_engine_service(ar, pipe);
}
-static void ath10k_pci_hif_post_init(struct ath10k *ar,
- struct ath10k_hif_cb *callbacks)
+static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -1250,10 +1256,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
}
}
+static void ath10k_pci_disable_irqs(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+ disable_irq(ar_pci->pdev->irq + i);
+}
+
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
+ * by ath10k_pci_start_intr(). */
+ ath10k_pci_disable_irqs(ar);
+
ath10k_pci_stop_ce(ar);
/* At this point, asynchronous threads are stopped, the target should
@@ -1263,7 +1284,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_process_ce(ar);
ath10k_pci_cleanup_ce(ar);
ath10k_pci_buffer_cleanup(ar);
- ath10k_pci_ce_deinit(ar);
+
+ ar_pci->started = 0;
}
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1735,6 +1757,124 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = ath10k_pci_start_intr(ar);
+ if (ret) {
+ ath10k_err("could not start interrupt handling (%d)\n", ret);
+ goto err;
+ }
+
+ /*
+ * Bring the target up cleanly.
+ *
+ * The target may be in an undefined state with an AUX-powered Target
+ * and a Host in WoW mode. If the Host crashes, loses power, or is
+ * restarted (without unloading the driver) then the Target is left
+ * (aux) powered and running. On a subsequent driver load, the Target
+ * is in an unexpected state. We try to catch that here in order to
+ * reset the Target and retry the probe.
+ */
+ ath10k_pci_device_reset(ar);
+
+ ret = ath10k_pci_reset_target(ar);
+ if (ret)
+ goto err_irq;
+
+ if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
+ /* Force AWAKE forever */
+ ath10k_do_pci_wake(ar);
+
+ ret = ath10k_pci_ce_init(ar);
+ if (ret)
+ goto err_ps;
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret)
+ goto err_ce;
+
+ ret = ath10k_pci_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err("could not wake up target CPU (%d)\n", ret);
+ goto err_ce;
+ }
+
+ return 0;
+
+err_ce:
+ ath10k_pci_ce_deinit(ar);
+err_ps:
+ if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
+ ath10k_do_pci_sleep(ar);
+err_irq:
+ ath10k_pci_stop_intr(ar);
+err:
+ return ret;
+}
+
+static void ath10k_pci_hif_power_down(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_pci_stop_intr(ar);
+
+ ath10k_pci_ce_deinit(ar);
+ if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
+ ath10k_do_pci_sleep(ar);
+}
+
+#ifdef CONFIG_PM
+
+#define ATH10K_PCI_PM_CONTROL 0x44
+
+static int ath10k_pci_hif_suspend(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 val;
+
+ pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+ if ((val & 0x000000ff) != 0x3) {
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+ (val & 0xffffff00) | 0x03);
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_hif_resume(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 val;
+
+ pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+ if ((val & 0x000000ff) != 0) {
+ pci_restore_state(pdev);
+ pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+ val & 0xffffff00);
+ /*
+ * Suspend/Resume resets the PCI configuration space,
+ * so we have to re-disable the RETRY_TIMEOUT register (0x41)
+ * to keep PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+ }
+
+ return 0;
+}
+#endif
+
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.send_head = ath10k_pci_hif_send_head,
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
@@ -1743,8 +1883,14 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
.get_default_pipe = ath10k_pci_hif_get_default_pipe,
.send_complete_check = ath10k_pci_hif_send_complete_check,
- .init = ath10k_pci_hif_post_init,
+ .set_callbacks = ath10k_pci_hif_set_callbacks,
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+ .power_up = ath10k_pci_hif_power_up,
+ .power_down = ath10k_pci_hif_power_down,
+#ifdef CONFIG_PM
+ .suspend = ath10k_pci_hif_suspend,
+ .resume = ath10k_pci_hif_resume,
+#endif
};
static void ath10k_pci_ce_tasklet(unsigned long ptr)
@@ -1872,8 +2018,13 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
ath10k_pci_msi_fw_handler,
IRQF_SHARED, "ath10k_pci", ar);
- if (ret)
+ if (ret) {
+ ath10k_warn("request_irq(%d) failed %d\n",
+ ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
+
+ pci_disable_msi(ar_pci->pdev);
return ret;
+ }
for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
ret = request_irq(ar_pci->pdev->irq + i,
@@ -2059,9 +2210,9 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
+static void ath10k_pci_device_reset(struct ath10k *ar)
{
- struct ath10k *ar = ar_pci->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *mem = ar_pci->mem;
int i;
u32 val;
@@ -2118,9 +2269,12 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
case ATH10K_PCI_FEATURE_MSI_X:
ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
break;
- case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
+ case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
break;
+ case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
+ ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+ break;
}
}
}
@@ -2145,7 +2299,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
switch (pci_dev->device) {
case QCA988X_1_0_DEVICE_ID:
- set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
+ set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
break;
case QCA988X_2_0_DEVICE_ID:
set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
@@ -2156,10 +2310,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
+ if (ath10k_target_ps)
+ set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
+
ath10k_pci_dump_features(ar_pci);
- ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
- &ath10k_pci_hif_ops);
+ ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
if (!ar) {
ath10k_err("ath10k_core_create failed!\n");
ret = -EINVAL;
@@ -2167,7 +2323,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
/* Enable QCA988X_1.0 HW workarounds */
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
+ if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
spin_lock_init(&ar_pci->hw_v1_workaround_lock);
ar_pci->ar = ar;
@@ -2241,62 +2397,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->cacheline_sz = dma_get_cache_alignment();
- ret = ath10k_pci_start_intr(ar);
- if (ret) {
- ath10k_err("could not start interrupt handling (%d)\n", ret);
- goto err_iomap;
- }
-
- /*
- * Bring the target up cleanly.
- *
- * The target may be in an undefined state with an AUX-powered Target
- * and a Host in WoW mode. If the Host crashes, loses power, or is
- * restarted (without unloading the driver) then the Target is left
- * (aux) powered and running. On a subsequent driver load, the Target
- * is in an unexpected state. We try to catch that here in order to
- * reset the Target and retry the probe.
- */
- ath10k_pci_device_reset(ar_pci);
-
- ret = ath10k_pci_reset_target(ar);
- if (ret)
- goto err_intr;
-
- if (ath10k_target_ps) {
- ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
- } else {
- /* Force AWAKE forever */
- ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
- ath10k_do_pci_wake(ar);
- }
-
- ret = ath10k_pci_ce_init(ar);
- if (ret)
- goto err_intr;
-
- ret = ath10k_pci_init_config(ar);
- if (ret)
- goto err_ce;
-
- ret = ath10k_pci_wake_target_cpu(ar);
- if (ret) {
- ath10k_err("could not wake up target CPU (%d)\n", ret);
- goto err_ce;
- }
-
ret = ath10k_core_register(ar);
if (ret) {
ath10k_err("could not register driver core (%d)\n", ret);
- goto err_ce;
+ goto err_iomap;
}
return 0;
-err_ce:
- ath10k_pci_ce_deinit(ar);
-err_intr:
- ath10k_pci_stop_intr(ar);
err_iomap:
pci_iounmap(pdev, mem);
err_master:
@@ -2333,7 +2441,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
tasklet_kill(&ar_pci->msi_fw_err);
ath10k_core_unregister(ar);
- ath10k_pci_stop_intr(ar);
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ar_pci->mem);
@@ -2345,128 +2452,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
kfree(ar_pci);
}
-#if defined(CONFIG_PM_SLEEP)
-
-#define ATH10K_PCI_PM_CONTROL 0x44
-
-static int ath10k_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct ath10k *ar = pci_get_drvdata(pdev);
- struct ath10k_pci *ar_pci;
- u32 val;
- int ret, retval;
-
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
-
- if (!ar)
- return -ENODEV;
-
- ar_pci = ath10k_pci_priv(ar);
- if (!ar_pci)
- return -ENODEV;
-
- if (ath10k_core_target_suspend(ar))
- return -EBUSY;
-
- ret = wait_event_interruptible_timeout(ar->event_queue,
- ar->is_target_paused == true,
- 1 * HZ);
- if (ret < 0) {
- ath10k_warn("suspend interrupted (%d)\n", ret);
- retval = ret;
- goto resume;
- } else if (ret == 0) {
- ath10k_warn("suspend timed out - target pause event never came\n");
- retval = EIO;
- goto resume;
- }
-
- /*
- * reset is_target_paused and host can check that in next time,
- * or it will always be TRUE and host just skip the waiting
- * condition, it causes target assert due to host already
- * suspend
- */
- ar->is_target_paused = false;
-
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0x3) {
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- (val & 0xffffff00) | 0x03);
- }
-
- return 0;
-resume:
- ret = ath10k_core_target_resume(ar);
- if (ret)
- ath10k_warn("could not resume (%d)\n", ret);
-
- return retval;
-}
-
-static int ath10k_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct ath10k *ar = pci_get_drvdata(pdev);
- struct ath10k_pci *ar_pci;
- int ret;
- u32 val;
-
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
-
- if (!ar)
- return -ENODEV;
- ar_pci = ath10k_pci_priv(ar);
-
- if (!ar_pci)
- return -ENODEV;
-
- ret = pci_enable_device(pdev);
- if (ret) {
- ath10k_warn("cannot enable PCI device: %d\n", ret);
- return ret;
- }
-
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0) {
- pci_restore_state(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- val & 0xffffff00);
- /*
- * Suspend/Resume resets the PCI configuration space,
- * so we have to re-disable the RETRY_TIMEOUT register (0x41)
- * to keep PCI Tx retries from interfering with C3 CPU state
- */
- pci_read_config_dword(pdev, 0x40, &val);
-
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- }
-
- ret = ath10k_core_target_resume(ar);
- if (ret)
- ath10k_warn("target resume failed: %d\n", ret);
-
- return ret;
-}
-
-static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
- ath10k_pci_suspend,
- ath10k_pci_resume);
-
-#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
-
-#else
-
-#define ATH10K_PCI_PM_OPS NULL
-
-#endif /* CONFIG_PM_SLEEP */
-
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
static struct pci_driver ath10k_pci_driver = {
@@ -2474,7 +2459,6 @@ static struct pci_driver ath10k_pci_driver = {
.id_table = ath10k_pci_id_table,
.probe = ath10k_pci_probe,
.remove = ath10k_pci_remove,
- .driver.pm = ATH10K_PCI_PM_OPS,
};
static int __init ath10k_pci_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index d2a055a07dc6..871bb339d56d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -152,7 +152,8 @@ struct service_to_pipe {
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
- ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1,
+ ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
+ ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
@@ -311,7 +312,7 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *addr = ar_pci->mem;
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+ if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
unsigned long irq_flags;
spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
@@ -335,20 +336,22 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
-extern unsigned int ath10k_target_ps;
-
void ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
static inline void ath10k_pci_wake(struct ath10k *ar)
{
- if (ath10k_target_ps)
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_wake(ar);
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
{
- if (ath10k_target_ps)
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 7d4b7987422d..55f90c761868 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -27,6 +27,13 @@ void ath10k_wmi_flush_tx(struct ath10k *ar)
{
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_WEDGED) {
+ ath10k_warn("wmi flush skipped - device is wedged anyway\n");
+ return;
+ }
+
ret = wait_event_timeout(ar->wmi.wq,
atomic_read(&ar->wmi.pending_tx_count) == 0,
5*HZ);
@@ -111,7 +118,7 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
- status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
+ status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
if (status) {
dev_kfree_skb_any(skb);
atomic_dec(&ar->wmi.pending_tx_count);
@@ -383,9 +390,82 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
+static int freq_to_idx(struct ath10k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
+ struct wmi_chan_info_event *ev;
+ struct survey_info *survey;
+ u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
+ int idx;
+
+ ev = (struct wmi_chan_info_event *)skb->data;
+
+ err_code = __le32_to_cpu(ev->err_code);
+ freq = __le32_to_cpu(ev->freq);
+ cmd_flags = __le32_to_cpu(ev->cmd_flags);
+ noise_floor = __le32_to_cpu(ev->noise_floor);
+ rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
+ cycle_count = __le32_to_cpu(ev->cycle_count);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
+ err_code, freq, cmd_flags, noise_floor, rx_clear_count,
+ cycle_count);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!ar->scan.in_progress) {
+ ath10k_warn("chan info event without a scan request?\n");
+ goto exit;
+ }
+
+ idx = freq_to_idx(ar, freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
+ freq, idx);
+ goto exit;
+ }
+
+ if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+ /* During scanning chan info is reported twice for each
+ * visited channel. The reported cycle count is global
+ * and per-channel cycle count must be calculated */
+
+ cycle_count -= ar->survey_last_cycle_count;
+ rx_clear_count -= ar->survey_last_rx_clear_count;
+
+ survey = &ar->survey[idx];
+ survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
+ survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
+ survey->noise = noise_floor;
+ survey->filled = SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_RX |
+ SURVEY_INFO_NOISE_DBM;
+ }
+
+ ar->survey_last_rx_clear_count = rx_clear_count;
+ ar->survey_last_cycle_count = cycle_count;
+
+exit:
+ spin_unlock_bh(&ar->data_lock);
}
static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
@@ -501,8 +581,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
(u8 *)skb_tail_pointer(bcn) - ies);
if (!ie) {
- /* highly unlikely for mac80211 */
- ath10k_warn("no tim ie found;\n");
+ if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ ath10k_warn("no tim ie found;\n");
return;
}
@@ -861,6 +941,13 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
(__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+ ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+
+ if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+ ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
+ ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+ }
ar->ath_common.regulatory.current_rd =
__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
@@ -885,7 +972,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
}
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
+ "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->sw_version_1),
__le32_to_cpu(ev->abi_version),
@@ -894,7 +981,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
__le32_to_cpu(ev->vht_cap_info),
__le32_to_cpu(ev->vht_supp_mcs),
__le32_to_cpu(ev->sys_cap_info),
- __le32_to_cpu(ev->num_mem_reqs));
+ __le32_to_cpu(ev->num_mem_reqs),
+ __le32_to_cpu(ev->num_rf_chains));
complete(&ar->wmi.service_ready);
}
@@ -1114,7 +1202,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
- status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
+ status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
if (status) {
ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
status);
@@ -1748,6 +1836,9 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
if (arg->key_data)
memcpy(cmd->key_data, arg->key_data, arg->key_len);
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
}
@@ -2011,6 +2102,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
cmd->peer_vht_rates.tx_mcs_set =
__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM\n",
+ arg->vdev_id, arg->addr);
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
}
@@ -2079,3 +2173,22 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
}
+
+int ath10k_wmi_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
+ type, delay_ms);
+ return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9555f5a0e041..2c5a4f8daf2e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -416,6 +416,7 @@ enum wmi_cmd_id {
WMI_PDEV_FTM_INTG_CMDID,
WMI_VDEV_SET_KEEPALIVE_CMDID,
WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
/* GPIO Configuration */
WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
@@ -2930,6 +2931,11 @@ struct wmi_chan_info_event {
__le32 cycle_count;
} __packed;
+#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+
+/* FIXME: empirically extrapolated */
+#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
+
/* Beacon filter wmi command info */
#define BCN_FLT_MAX_SUPPORTED_IES 256
#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)
@@ -2972,6 +2978,22 @@ struct wmi_sta_keepalive_cmd {
struct wmi_sta_keepalive_arp_resp arp_resp;
} __packed;
+enum wmi_force_fw_hang_type {
+ WMI_FORCE_FW_HANG_ASSERT = 1,
+ WMI_FORCE_FW_HANG_NO_DETECT,
+ WMI_FORCE_FW_HANG_CTRL_EP_FULL,
+ WMI_FORCE_FW_HANG_EMPTY_POINT,
+ WMI_FORCE_FW_HANG_STACK_OVERFLOW,
+ WMI_FORCE_FW_HANG_INFINITE_LOOP,
+};
+
+#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF
+
+struct wmi_force_fw_hang_cmd {
+ __le32 type;
+ __le32 delay_ms;
+} __packed;
+
#define ATH10K_RTS_MAX 2347
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -3048,5 +3070,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
+int ath10k_wmi_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 2d691b8b95b9..74bd54d6aceb 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -29,6 +29,7 @@
#include <linux/average.h>
#include <linux/leds.h>
#include <net/mac80211.h>
+#include <net/cfg80211.h>
/* RX/TX descriptor hw structs
* TODO: Driver part should only see sw structs */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index ce67ab791eae..48161edec8de 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -56,6 +56,7 @@
#include <linux/etherdevice.h>
#include <linux/nl80211.h>
+#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
@@ -165,28 +166,36 @@ static const struct ieee80211_rate ath5k_rates[] = {
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = ATH5K_RATE_CODE_6M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 90,
.hw_value = ATH5K_RATE_CODE_9M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 120,
.hw_value = ATH5K_RATE_CODE_12M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 180,
.hw_value = ATH5K_RATE_CODE_18M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 240,
.hw_value = ATH5K_RATE_CODE_24M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 360,
.hw_value = ATH5K_RATE_CODE_36M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 480,
.hw_value = ATH5K_RATE_CODE_48M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 540,
.hw_value = ATH5K_RATE_CODE_54M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
};
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -435,11 +444,27 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
* Called with ah->lock.
*/
int
-ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
+ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
{
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"channel set, resetting (%u -> %u MHz)\n",
- ah->curchan->center_freq, chan->center_freq);
+ ah->curchan->center_freq, chandef->chan->center_freq);
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ ah->ah_bwmode = AR5K_BWMODE_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ ah->ah_bwmode = AR5K_BWMODE_10MHZ;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
/*
* To switch channels clear any pending DMA operations;
@@ -447,7 +472,7 @@ ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
* hardware at the new frequency, and then re-enable
* the relevant bits of the h/w.
*/
- return ath5k_reset(ah, chan, true);
+ return ath5k_reset(ah, chandef->chan, true);
}
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
@@ -1400,6 +1425,16 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_5MHZ:
+ rxs->flag |= RX_FLAG_5MHZ;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ rxs->flag |= RX_FLAG_10MHZ;
+ break;
+ default:
+ break;
+ }
if (rxs->rate_idx >= 0 && rs->rs_rate ==
ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
@@ -2507,6 +2542,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
/* SW support for IBSS_RSN is provided by mac80211 */
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
+
/* both antennas can be configured as RX or TX */
hw->wiphy->available_antennas_tx = 0x3;
hw->wiphy->available_antennas_rx = 0x3;
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index ca9a83ceeee1..97469d0fbad7 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -101,7 +101,7 @@ void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
struct ieee80211_vif *vif);
-int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
+int ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef);
void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 9d00dab666a8..b8d031ae63c2 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -245,9 +245,11 @@ static ssize_t write_file_beacon(struct file *file,
struct ath5k_hw *ah = file->private_data;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "disable", 7) == 0) {
AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
pr_info("debugfs disable beacons\n");
@@ -345,9 +347,11 @@ static ssize_t write_file_debug(struct file *file,
unsigned int i;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
if (strncmp(buf, dbg_info[i].name,
strlen(dbg_info[i].name)) == 0) {
@@ -448,9 +452,11 @@ static ssize_t write_file_antenna(struct file *file,
unsigned int i;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "diversity", 9) == 0) {
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
pr_info("debug: enable diversity\n");
@@ -619,9 +625,11 @@ static ssize_t write_file_frameerrors(struct file *file,
struct ath5k_statistics *st = &ah->stats;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "clear", 5) == 0) {
st->rxerr_crc = 0;
st->rxerr_phy = 0;
@@ -766,9 +774,11 @@ static ssize_t write_file_ani(struct file *file,
struct ath5k_hw *ah = file->private_data;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "sens-low", 8) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
} else if (strncmp(buf, "sens-high", 9) == 0) {
@@ -862,9 +872,11 @@ static ssize_t write_file_queue(struct file *file,
struct ath5k_hw *ah = file->private_data;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "start", 5) == 0)
ieee80211_wake_queues(ah->hw);
else if (strncmp(buf, "stop", 4) == 0)
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 40825d43322e..4ee01f654235 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -202,7 +202,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ah->lock);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ret = ath5k_chan_set(ah, conf->chandef.chan);
+ ret = ath5k_chan_set(ah, &conf->chandef);
if (ret < 0)
goto unlock;
}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 1f16b4227d8f..c60d36aa13e2 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -144,11 +144,13 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
sifs = AR5K_INIT_SIFS_HALF_RATE;
preamble *= 2;
sym_time *= 2;
+ bitrate = DIV_ROUND_UP(bitrate, 2);
break;
case AR5K_BWMODE_5MHZ:
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
preamble *= 4;
sym_time *= 4;
+ bitrate = DIV_ROUND_UP(bitrate, 4);
break;
default:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 65fe929529a8..0583c69d26db 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -566,9 +566,11 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+ u32 rate_flags, i;
if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
@@ -605,7 +607,28 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
else
band = IEEE80211_BAND_2GHZ;
- rate = &ah->sbands[band].bitrates[0];
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_5MHZ:
+ rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
+ break;
+ default:
+ rate_flags = 0;
+ break;
+ }
+ sband = &ah->sbands[band];
+ rate = NULL;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ rate = &sband->bitrates[i];
+ break;
+ }
+ if (WARN_ON(!rate))
+ return -EINVAL;
+
ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
/* ack_tx_time includes an SIFS already */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 6a67881f94d6..4f316bdcbab5 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1836,6 +1836,9 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
clear_bit(WMI_READY, &ar->flag);
+ if (ar->fw_recovery.enable)
+ del_timer_sync(&ar->fw_recovery.hb_timer);
+
/*
* After wmi_shudown all WMI events will be dropped. We
* need to cleanup the buffers allocated in AP mode and
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index d4fcfcad57d0..5839fc23bdc7 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -29,6 +29,9 @@ struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
struct ath6kl_sta *conn = NULL;
u8 i, max_conn;
+ if (is_zero_ether_addr(node_addr))
+ return NULL;
+
max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
for (i = 0; i < max_conn; i++) {
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index acc9aa832f76..d67170ea1038 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -66,7 +66,8 @@ nla_put_failure:
ath6kl_warn("nla_put failed on testmode rx skb!\n");
}
-int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
+int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len)
{
struct ath6kl *ar = wiphy_priv(wiphy);
struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.h b/drivers/net/wireless/ath/ath6kl/testmode.h
index fe651d6707df..9fbcdec3e208 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.h
+++ b/drivers/net/wireless/ath/ath6kl/testmode.h
@@ -20,7 +20,8 @@
#ifdef CONFIG_NL80211_TESTMODE
void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len);
-int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len);
+int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len);
#else
@@ -29,7 +30,9 @@ static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf,
{
}
-static inline int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
+static inline int ath6kl_tm_cmd(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ void *data, int len)
{
return 0;
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 87aefb4c4c23..546d5da0b894 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -568,8 +568,8 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
dlen, freq, vif->probe_req_report);
if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
- cfg80211_rx_mgmt(&vif->wdev, freq, 0,
- ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0,
+ GFP_ATOMIC);
return 0;
}
@@ -608,8 +608,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
- cfg80211_rx_mgmt(&vif->wdev, freq, 0,
- ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, GFP_ATOMIC);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d491a3178986..7944c25c9a43 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -56,7 +56,7 @@ config ATH9K_AHB
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K
+ depends on ATH9K && DEBUG_FS
select MAC80211_DEBUGFS
select RELAY
---help---
@@ -96,6 +96,16 @@ config ATH9K_LEGACY_RATE_CONTROL
has to be passed to mac80211 using the module parameter,
ieee80211_default_rc_algo.
+config ATH9K_RFKILL
+ bool "Atheros ath9k rfkill support" if EXPERT
+ depends on ATH9K
+ depends on RFKILL=y || RFKILL=ATH9K
+ default y
+ help
+ Say Y to have ath9k poll the RF-Kill GPIO every couple of
+ seconds. Turn off to save power, but enable it if you have
+ a platform that can toggle the RF-Kill GPIO.
+
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 4994bea809eb..be466b0ef7a7 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -319,9 +319,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
ah->ani_function = 0;
}
- /* always allow mode (on/off) to be controlled */
- ah->ani_function |= ATH9K_ANI_MODE;
-
ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
aniState->ofdmNoiseImmunityLevel);
cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index b54a3fb01883..21e7b83c3f6a 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -48,15 +48,10 @@
/* values here are relative to the INI */
enum ath9k_ani_cmd {
- ATH9K_ANI_PRESENT = 0x1,
- ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4,
- ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8,
- ATH9K_ANI_FIRSTEP_LEVEL = 0x10,
- ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
- ATH9K_ANI_MODE = 0x40,
- ATH9K_ANI_PHYERR_RESET = 0x80,
- ATH9K_ANI_MRC_CCK = 0x100,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x1,
+ ATH9K_ANI_FIRSTEP_LEVEL = 0x2,
+ ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x4,
+ ATH9K_ANI_MRC_CCK = 0x8,
ATH9K_ANI_ALL = 0xfff
};
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index 664844c5d3d5..dd1cc73d7946 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -16,37 +16,119 @@
#include "ath9k.h"
-static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
+/*
+ * AR9285
+ * ======
+ *
+ * EEPROM has 2 4-bit fields containing the card configuration.
+ *
+ * antdiv_ctl1:
+ * ------------
+ * bb_enable_ant_div_lnadiv : 1
+ * bb_ant_div_alt_gaintb : 1
+ * bb_ant_div_main_gaintb : 1
+ * bb_enable_ant_fast_div : 1
+ *
+ * antdiv_ctl2:
+ * -----------
+ * bb_ant_div_alt_lnaconf : 2
+ * bb_ant_div_main_lnaconf : 2
+ *
+ * The EEPROM bits are used as follows:
+ * ------------------------------------
+ *
+ * bb_enable_ant_div_lnadiv - Enable LNA path rx antenna diversity/combining.
+ * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ *
+ * bb_ant_div_[alt/main]_gaintb - 0 -> Antenna config Alt/Main uses gaintable 0
+ * 1 -> Antenna config Alt/Main uses gaintable 1
+ * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ *
+ * bb_enable_ant_fast_div - Enable fast antenna diversity.
+ * Set in AR_PHY_CCK_DETECT.
+ *
+ * bb_ant_div_[alt/main]_lnaconf - Alt/Main LNA diversity/combining input config.
+ * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ * 10=LNA1
+ * 01=LNA2
+ * 11=LNA1+LNA2
+ * 00=LNA1-LNA2
+ *
+ * AR9485 / AR9565 / AR9331
+ * ========================
+ *
+ * The same bits are present in the EEPROM, but the location in the
+ * EEPROM is different (ant_div_control in ar9300_BaseExtension_1).
+ *
+ * ant_div_alt_lnaconf ==> bit 0~1
+ * ant_div_main_lnaconf ==> bit 2~3
+ * ant_div_alt_gaintb ==> bit 4
+ * ant_div_main_gaintb ==> bit 5
+ * enable_ant_div_lnadiv ==> bit 6
+ * enable_ant_fast_div ==> bit 7
+ */
+
+static inline bool ath_is_alt_ant_ratio_better(struct ath_ant_comb *antcomb,
+ int alt_ratio, int maxdelta,
int mindelta, int main_rssi_avg,
int alt_rssi_avg, int pkt_count)
{
- return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
- (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
+ if (pkt_count <= 50)
+ return false;
+
+ if (alt_rssi_avg > main_rssi_avg + mindelta)
+ return true;
+
+ if (alt_ratio >= antcomb->ant_ratio2 &&
+ alt_rssi_avg >= antcomb->low_rssi_thresh &&
+ (alt_rssi_avg > main_rssi_avg + maxdelta))
+ return true;
+
+ return false;
}
-static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
- int curr_main_set, int curr_alt_set,
- int alt_rssi_avg, int main_rssi_avg)
+static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf *conf,
+ struct ath_ant_comb *antcomb,
+ int alt_ratio, int alt_rssi_avg,
+ int main_rssi_avg)
{
- bool result = false;
- switch (div_group) {
+ bool result, set1, set2;
+
+ result = set1 = set2 = false;
+
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2 &&
+ conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA1)
+ set1 = true;
+
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA1 &&
+ conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ set2 = true;
+
+ switch (conf->div_group) {
case 0:
if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
result = true;
break;
case 1:
case 2:
- if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
- (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
- (alt_rssi_avg >= (main_rssi_avg - 5))) ||
- ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
- (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
- (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
- (alt_rssi_avg >= 4))
+ if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
+ break;
+
+ if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 5))) ||
+ (set2 && (alt_rssi_avg >= (main_rssi_avg - 2))) ||
+ (alt_ratio > antcomb->ant_ratio))
result = true;
- else
- result = false;
+
+ break;
+ case 3:
+ if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
+ break;
+
+ if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 3))) ||
+ (set2 && (alt_rssi_avg >= (main_rssi_avg + 3))) ||
+ (alt_ratio > antcomb->ant_ratio))
+ result = true;
+
break;
}
@@ -108,6 +190,74 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
}
}
+static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf *conf)
+{
+ /* set alt to the conf with maximun ratio */
+ if (antcomb->first_ratio && antcomb->second_ratio) {
+ if (antcomb->rssi_second > antcomb->rssi_third) {
+ /* first alt*/
+ if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2*/
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf =
+ antcomb->first_quick_scan_conf;
+ } else if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) {
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ } else {
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+ }
+ } else if (antcomb->first_ratio) {
+ /* first alt */
+ if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->first_quick_scan_conf;
+ } else if (antcomb->second_ratio) {
+ /* second alt */
+ if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+ } else {
+ /* main is largest */
+ if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->main_conf;
+ }
+}
+
static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
struct ath_hw_antcomb_conf *div_ant_conf,
int main_rssi_avg, int alt_rssi_avg,
@@ -129,7 +279,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
/* main is LNA1 */
- if (ath_is_alt_ant_ratio_better(alt_ratio,
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
main_rssi_avg, alt_rssi_avg,
@@ -138,7 +288,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
else
antcomb->first_ratio = false;
} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
main_rssi_avg, alt_rssi_avg,
@@ -147,11 +297,11 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
else
antcomb->first_ratio = false;
} else {
- if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg +
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
- (alt_rssi_avg > main_rssi_avg)) &&
- (antcomb->total_pkt_count > 50))
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ 0,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
antcomb->first_ratio = true;
else
antcomb->first_ratio = false;
@@ -164,17 +314,21 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
antcomb->rssi_first = main_rssi_avg;
antcomb->rssi_third = alt_rssi_avg;
- if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
+ switch(antcomb->second_quick_scan_conf) {
+ case ATH_ANT_DIV_COMB_LNA1:
antcomb->rssi_lna1 = alt_rssi_avg;
- else if (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2)
+ break;
+ case ATH_ANT_DIV_COMB_LNA2:
antcomb->rssi_lna2 = alt_rssi_avg;
- else if (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
antcomb->rssi_lna2 = main_rssi_avg;
else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
antcomb->rssi_lna1 = main_rssi_avg;
+ break;
+ default:
+ break;
}
if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
@@ -184,7 +338,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
main_rssi_avg, alt_rssi_avg,
@@ -193,7 +347,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
else
antcomb->second_ratio = false;
} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
main_rssi_avg, alt_rssi_avg,
@@ -202,105 +356,18 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
else
antcomb->second_ratio = false;
} else {
- if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg +
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
- (alt_rssi_avg > main_rssi_avg)) &&
- (antcomb->total_pkt_count > 50))
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ 0,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
antcomb->second_ratio = true;
else
antcomb->second_ratio = false;
}
- /* set alt to the conf with maximun ratio */
- if (antcomb->first_ratio && antcomb->second_ratio) {
- if (antcomb->rssi_second > antcomb->rssi_third) {
- /* first alt*/
- if ((antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2*/
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->first_quick_scan_conf;
- } else if ((antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2)) {
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- } else {
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->second_quick_scan_conf;
- }
- } else if (antcomb->first_ratio) {
- /* first alt */
- if ((antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->first_quick_scan_conf;
- } else if (antcomb->second_ratio) {
- /* second alt */
- if ((antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->second_quick_scan_conf;
- } else {
- /* main is largest */
- if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf = antcomb->main_conf;
- }
+ ath_ant_set_alt_ratio(antcomb, div_ant_conf);
+
break;
default:
break;
@@ -430,8 +497,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x10: /* LNA2 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
@@ -440,15 +506,13 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x13: /* LNA2 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
break;
case 0x20: /* LNA1 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
@@ -457,8 +521,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x23: /* LNA1 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
@@ -475,6 +538,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
default:
break;
}
+
+ if (antcomb->fast_div_bias)
+ ant_conf->fast_div_bias = antcomb->fast_div_bias;
} else if (ant_conf->div_group == 3) {
switch ((ant_conf->main_lna_conf << 4) |
ant_conf->alt_lna_conf) {
@@ -540,6 +606,138 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
}
}
+static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf *conf,
+ int curr_alt_set, int alt_rssi_avg,
+ int main_rssi_avg)
+{
+ switch (curr_alt_set) {
+ case ATH_ANT_DIV_COMB_LNA2:
+ antcomb->rssi_lna2 = alt_rssi_avg;
+ antcomb->rssi_lna1 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1:
+ antcomb->rssi_lna1 = alt_rssi_avg;
+ antcomb->rssi_lna2 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
+ antcomb->rssi_add = alt_rssi_avg;
+ antcomb->scan = true;
+ /* set to A-B */
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
+ antcomb->rssi_sub = alt_rssi_avg;
+ antcomb->scan = false;
+ if (antcomb->rssi_lna2 >
+ (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+ /* use LNA2 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA1 */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ }
+ } else {
+ /* use LNA1 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA2 */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static bool ath_ant_try_switch(struct ath_hw_antcomb_conf *div_ant_conf,
+ struct ath_ant_comb *antcomb,
+ int alt_ratio, int alt_rssi_avg,
+ int main_rssi_avg, int curr_main_set,
+ int curr_alt_set)
+{
+ bool ret = false;
+
+ if (ath_ant_div_comb_alt_check(div_ant_conf, antcomb, alt_ratio,
+ alt_rssi_avg, main_rssi_avg)) {
+ if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
+ /*
+ * Switch main and alt LNA.
+ */
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ }
+
+ ret = true;
+ } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
+ (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
+ /*
+ Set alt to another LNA.
+ */
+ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+
+ ret = true;
+ }
+
+ return ret;
+}
+
+static bool ath_ant_short_scan_check(struct ath_ant_comb *antcomb)
+{
+ int alt_ratio;
+
+ if (!antcomb->scan || !antcomb->alt_good)
+ return false;
+
+ if (time_after(jiffies, antcomb->scan_start_time +
+ msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
+ return true;
+
+ if (antcomb->total_pkt_count == ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
+ alt_ratio = ((antcomb->alt_recv_cnt * 100) /
+ antcomb->total_pkt_count);
+ if (alt_ratio < antcomb->ant_ratio)
+ return true;
+ }
+
+ return false;
+}
+
void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
{
struct ath_hw_antcomb_conf div_ant_conf;
@@ -549,41 +747,46 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
int main_rssi = rs->rs_rssi_ctl0;
int alt_rssi = rs->rs_rssi_ctl1;
int rx_ant_conf, main_ant_conf;
- bool short_scan = false;
+ bool short_scan = false, ret;
rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
ATH_ANT_RX_MASK;
main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
ATH_ANT_RX_MASK;
+ if (alt_rssi >= antcomb->low_rssi_thresh) {
+ antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO;
+ antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2;
+ } else {
+ antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI;
+ antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI;
+ }
+
/* Record packet only when both main_rssi and alt_rssi is positive */
if (main_rssi > 0 && alt_rssi > 0) {
antcomb->total_pkt_count++;
antcomb->main_total_rssi += main_rssi;
antcomb->alt_total_rssi += alt_rssi;
+
if (main_ant_conf == rx_ant_conf)
antcomb->main_recv_cnt++;
else
antcomb->alt_recv_cnt++;
}
- /* Short scan check */
- if (antcomb->scan && antcomb->alt_good) {
- if (time_after(jiffies, antcomb->scan_start_time +
- msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
- short_scan = true;
- else
- if (antcomb->total_pkt_count ==
- ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
- alt_ratio = ((antcomb->alt_recv_cnt * 100) /
- antcomb->total_pkt_count);
- if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
- short_scan = true;
- }
+ if (main_ant_conf == rx_ant_conf) {
+ ANT_STAT_INC(ANT_MAIN, recv_cnt);
+ ANT_LNA_INC(ANT_MAIN, rx_ant_conf);
+ } else {
+ ANT_STAT_INC(ANT_ALT, recv_cnt);
+ ANT_LNA_INC(ANT_ALT, rx_ant_conf);
}
+ /* Short scan check */
+ short_scan = ath_ant_short_scan_check(antcomb);
+
if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
- rs->rs_moreaggr) && !short_scan)
+ rs->rs_moreaggr) && !short_scan)
return;
if (antcomb->total_pkt_count) {
@@ -595,15 +798,13 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
antcomb->total_pkt_count);
}
-
ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
curr_alt_set = div_ant_conf.alt_lna_conf;
curr_main_set = div_ant_conf.main_lna_conf;
-
antcomb->count++;
if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
- if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
+ if (alt_ratio > antcomb->ant_ratio) {
ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
main_rssi_avg);
antcomb->alt_good = true;
@@ -617,153 +818,47 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
}
if (!antcomb->scan) {
- if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
- alt_ratio, curr_main_set, curr_alt_set,
- alt_rssi_avg, main_rssi_avg)) {
- if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
- /* Switch main and alt LNA */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
-
- goto div_comb_done;
- } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
- (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
- /* Set alt to another LNA */
- if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
-
- goto div_comb_done;
- }
-
- if ((alt_rssi_avg < (main_rssi_avg +
- div_ant_conf.lna1_lna2_delta)))
+ ret = ath_ant_try_switch(&div_ant_conf, antcomb, alt_ratio,
+ alt_rssi_avg, main_rssi_avg,
+ curr_main_set, curr_alt_set);
+ if (ret)
goto div_comb_done;
}
+ if (!antcomb->scan &&
+ (alt_rssi_avg < (main_rssi_avg + div_ant_conf.lna1_lna2_delta)))
+ goto div_comb_done;
+
if (!antcomb->scan_not_start) {
- switch (curr_alt_set) {
- case ATH_ANT_DIV_COMB_LNA2:
- antcomb->rssi_lna2 = alt_rssi_avg;
- antcomb->rssi_lna1 = main_rssi_avg;
- antcomb->scan = true;
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1:
- antcomb->rssi_lna1 = alt_rssi_avg;
- antcomb->rssi_lna2 = main_rssi_avg;
- antcomb->scan = true;
- /* set to A+B */
- div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
- antcomb->rssi_add = alt_rssi_avg;
- antcomb->scan = true;
- /* set to A-B */
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
- antcomb->rssi_sub = alt_rssi_avg;
- antcomb->scan = false;
- if (antcomb->rssi_lna2 >
- (antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
- /* use LNA2 as main LNA */
- if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
- (antcomb->rssi_add > antcomb->rssi_sub)) {
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- } else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
- /* set to A-B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- } else {
- /* set to LNA1 */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- }
- } else {
- /* use LNA1 as main LNA */
- if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
- (antcomb->rssi_add > antcomb->rssi_sub)) {
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- } else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
- /* set to A-B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- } else {
- /* set to LNA2 */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
- }
- break;
- default:
- break;
- }
+ ath_ant_try_scan(antcomb, &div_ant_conf, curr_alt_set,
+ alt_rssi_avg, main_rssi_avg);
} else {
if (!antcomb->alt_good) {
antcomb->scan_not_start = false;
/* Set alt to another LNA */
if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
+ ATH_ANT_DIV_COMB_LNA2;
div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
+ ATH_ANT_DIV_COMB_LNA1;
} else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
+ ATH_ANT_DIV_COMB_LNA1;
div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
+ ATH_ANT_DIV_COMB_LNA2;
}
goto div_comb_done;
}
+ ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
+ main_rssi_avg, alt_rssi_avg,
+ alt_ratio);
+ antcomb->quick_scan_cnt++;
}
- ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
- main_rssi_avg, alt_rssi_avg,
- alt_ratio);
-
- antcomb->quick_scan_cnt++;
-
div_comb_done:
ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
+ ath9k_debug_stat_ant(sc, &div_ant_conf, main_rssi_avg, alt_rssi_avg);
antcomb->scan_start_time = jiffies;
antcomb->total_pkt_count = 0;
@@ -772,26 +867,3 @@ div_comb_done:
antcomb->main_recv_cnt = 0;
antcomb->alt_recv_cnt = 0;
}
-
-void ath_ant_comb_update(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_hw_antcomb_conf div_ant_conf;
- u8 lna_conf;
-
- ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
-
- if (sc->ant_rx == 1)
- lna_conf = ATH_ANT_DIV_COMB_LNA1;
- else
- lna_conf = ATH_ANT_DIV_COMB_LNA2;
-
- div_ant_conf.main_lna_conf = lna_conf;
- div_ant_conf.alt_lna_conf = lna_conf;
-
- ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
-
- if (common->antenna_diversity)
- ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
-}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 1576d58291d4..08656473c63e 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -1160,8 +1160,6 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
*/
WARN_ON(1);
break;
- case ATH9K_ANI_PRESENT:
- break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 8dc2d089cdef..fb61b081d172 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -269,13 +269,12 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
val |= AR_WA_D3_L1_DISABLE;
} else {
- if (((AR_SREV_9285(ah) ||
- AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) &&
- (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
- (AR_SREV_9280(ah) &&
- (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
- val |= AR_WA_D3_L1_DISABLE;
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
+ if (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
+ val |= AR_WA_D3_L1_DISABLE;
+ } else if (AR_SREV_9280(ah)) {
+ if (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
+ val |= AR_WA_D3_L1_DISABLE;
}
}
@@ -297,24 +296,18 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
} else {
if (ah->config.pcie_waen) {
val = ah->config.pcie_waen;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
+ val &= (~AR_WA_D3_L1_DISABLE);
} else {
- if (AR_SREV_9285(ah) ||
- AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
val = AR9285_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- }
- else if (AR_SREV_9280(ah)) {
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else if (AR_SREV_9280(ah)) {
/*
* For AR9280 chips, bit 22 of 0x4004
* needs to be set.
*/
val = AR9280_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
+ val &= (~AR_WA_D3_L1_DISABLE);
} else {
val = AR_WA_DEFAULT;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index f4003512d8d5..1fc1fa955d44 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -555,6 +555,69 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
}
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ u8 antdiv_ctrl1, antdiv_ctrl2;
+ u32 regval;
+
+ if (enable) {
+ antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE;
+ antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE;
+
+ /*
+ * Don't disable BT ant to allow BB to control SWCOM.
+ */
+ btcoex->bt_coex_mode2 &= (~(AR_BT_DISABLE_BT_ANT));
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, ATH_BT_COEX_ANT_DIV_SWITCH_COM);
+ REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
+ } else {
+ /*
+ * Disable antenna diversity, use LNA1 only.
+ */
+ antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A;
+ antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A;
+
+ /*
+ * Disable BT Ant. to allow concurrent BT and WLAN receive.
+ */
+ btcoex->bt_coex_mode2 |= AR_BT_DISABLE_BT_ANT;
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+
+ /*
+ * Program SWCOM table to make sure RF switch always parks
+ * at BT side.
+ */
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, 0);
+ REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
+ }
+
+ regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ regval &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL));
+ /*
+ * Clear ant_fast_div_bias [14:9] since for WB195,
+ * the main LNA is always LNA1.
+ */
+ regval &= (~(AR_PHY_9285_FAST_DIV_BIAS));
+ regval |= SM(antdiv_ctrl1, AR_PHY_9285_ANT_DIV_CTL);
+ regval |= SM(antdiv_ctrl2, AR_PHY_9285_ANT_DIV_ALT_LNACONF);
+ regval |= SM((antdiv_ctrl2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
+ regval |= SM((antdiv_ctrl1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB);
+ regval |= SM((antdiv_ctrl1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB);
+ REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
+
+ regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regval &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ regval |= SM((antdiv_ctrl1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+}
+
+#endif
+
static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
struct ath_spec_scan *param)
{
@@ -634,5 +697,9 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger;
ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait;
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
+#endif
+
ar9002_hw_set_nf_limits(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index f9eb2c357169..6314ae2e93e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -317,13 +317,15 @@
#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
-#define AR_PHY_9285_ANT_DIV_LNA1 2
-#define AR_PHY_9285_ANT_DIV_LNA2 1
-#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
-#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+#define ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE 0x0b
+#define ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE 0x09
+#define ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A 0x04
+#define ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A 0x09
+#define ATH_BT_COEX_ANT_DIV_SWITCH_COM 0x66666666
+
#define AR_PHY_EXT_CCA0 0x99b8
#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
#define AR_PHY_EXT_CCA0_THRESH62_S 0
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d105e43d22e1..f4864807e15b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3541,13 +3541,12 @@ static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is2ghz)
return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt);
}
-
-static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
+u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
{
return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon);
}
-static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
+u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
{
return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2);
}
@@ -3561,6 +3560,7 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
{
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_capabilities *pCap = &ah->caps;
int chain;
u32 regval, value, gpio;
@@ -3614,6 +3614,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
}
value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
+ if (AR_SREV_9485(ah) && common->bt_ant_diversity) {
+ value &= ~AR_SWITCH_TABLE_COM2_ALL;
+ value |= ah->config.ant_ctrl_comm2g_switch_enable;
+
+ }
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
@@ -3645,8 +3650,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
regval &= (~AR_PHY_ANT_DIV_LNADIV);
regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+ if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+ regval |= AR_ANT_DIV_ENABLE;
+
if (AR_SREV_9565(ah)) {
- if (ah->shared_chain_lnadiv) {
+ if (common->bt_ant_diversity) {
regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
} else {
regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
@@ -3656,10 +3664,14 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
- /*enable fast_div */
+ /* enable fast_div */
regval = REG_READ(ah, AR_PHY_CCK_DETECT);
regval &= (~AR_FAST_DIV_ENABLE);
regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+
+ if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+ regval |= AR_FAST_DIV_ENABLE;
+
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
@@ -3673,9 +3685,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
AR_PHY_ANT_DIV_ALT_GAINTB |
AR_PHY_ANT_DIV_MAIN_GAINTB));
/* by default use LNA1 for the main antenna */
- regval |= (AR_PHY_ANT_DIV_LNA1 <<
+ regval |= (ATH_ANT_DIV_COMB_LNA1 <<
AR_PHY_ANT_DIV_MAIN_LNACONF_S);
- regval |= (AR_PHY_ANT_DIV_LNA2 <<
+ regval |= (ATH_ANT_DIV_COMB_LNA2 <<
AR_PHY_ANT_DIV_ALT_LNACONF_S);
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
}
@@ -3813,6 +3825,11 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
else
value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
+ if (ah->config.alt_mingainidx)
+ REG_RMW_FIELD(ah, AR_PHY_EXT_ATTEN_CTL_0,
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
+ value);
+
REG_RMW_FIELD(ah, ext_atten_reg[i],
AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
value);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 874f6570bd1c..75d4fb41962f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -334,6 +334,8 @@ struct ar9300_eeprom {
s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
+u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz);
+u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz);
u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index d402cb32283f..608bb4824e2a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -153,7 +153,7 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
if (!ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
ar9340_1p0_radio_core_40M);
- } else if (AR_SREV_9485_11(ah)) {
+ } else if (AR_SREV_9485_11_OR_LATER(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
ar9485_1_1_mac_core);
@@ -424,7 +424,7 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_lowest_ob_db_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485_modes_lowest_ob_db_tx_gain_1_1);
else if (AR_SREV_9550(ah))
@@ -458,7 +458,7 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_high_ob_db_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_high_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@@ -492,7 +492,7 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_low_ob_db_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_low_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@@ -517,7 +517,7 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_high_power_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_high_power_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@@ -552,7 +552,7 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
{
- if (AR_SREV_9485_11(ah))
+ if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_green_ob_db_tx_gain_1_1);
else if (AR_SREV_9340(ah))
@@ -571,7 +571,7 @@ static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_green_spur_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@@ -611,7 +611,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9340Common_rx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9485_common_rx_gain_1_1);
else if (AR_SREV_9550(ah)) {
@@ -644,7 +644,7 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9340Common_wo_xlna_rx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9485Common_wo_xlna_rx_gain_1_1);
else if (AR_SREV_9462_21(ah))
@@ -745,16 +745,25 @@ static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
bool power_off)
{
+ /*
+ * Increase L1 Entry Latency. Some WB222 boards don't have
+ * this change in eeprom/OTP.
+ *
+ */
+ if (AR_SREV_9462(ah)) {
+ u32 val = ah->config.aspm_l1_fix;
+ if ((val & 0xff000000) == 0x17000000) {
+ val &= 0x00ffffff;
+ val |= 0x27000000;
+ REG_WRITE(ah, 0x570c, val);
+ }
+ }
+
/* Nothing to do on restore for 11N */
if (!power_off /* !restore */) {
/* set bit 19 to allow forcing of pcie core into L1 state */
REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
-
- /* Several PCIe massages to ensure proper behaviour */
- if (ah->config.pcie_waen)
- REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
- else
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 5163abd3937c..f6c5c1b50471 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -491,6 +491,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
+ rxs->rs_firstaggr = (rxsp->status11 & AR_RxFirstAggr) ? 1 : 0;
rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1f694ab3cc78..e897648d3233 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -632,6 +632,22 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
+ AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
+
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
+ ah->enabled_cals |= TX_IQ_CAL;
+ else
+ ah->enabled_cals &= ~TX_IQ_CAL;
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
+ ah->enabled_cals |= TX_CL_CAL;
+ else
+ ah->enabled_cals &= ~TX_CL_CAL;
+ }
}
static void ar9003_hw_prog_ini(struct ath_hw *ah,
@@ -814,29 +830,12 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
if (chan->channel == 2484)
ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
- REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
- AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
-
ah->modes_index = modesIndex;
ar9003_hw_override_ini(ah);
ar9003_hw_set_channel_regs(ah, chan);
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
ath9k_hw_apply_txpower(ah, chan, false);
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
- if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
- AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
- ah->enabled_cals |= TX_IQ_CAL;
- else
- ah->enabled_cals &= ~TX_IQ_CAL;
-
- if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
- ah->enabled_cals |= TX_CL_CAL;
- else
- ah->enabled_cals &= ~TX_CL_CAL;
- }
-
return 0;
}
@@ -1173,6 +1172,10 @@ skip_ws_det:
* is_on == 0 means MRC CCK is OFF (more noise imm)
*/
bool is_on = param ? 1 : 0;
+
+ if (ah->caps.rx_chainmask == 1)
+ break;
+
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_ENABLE, is_on);
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
@@ -1190,8 +1193,6 @@ skip_ws_det:
}
break;
}
- case ATH9K_ANI_PRESENT:
- break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
@@ -1413,65 +1414,111 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
}
-static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
- bool enable)
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
u8 ant_div_ctl1;
u32 regval;
- if (!AR_SREV_9565(ah))
+ if (!AR_SREV_9485(ah) && !AR_SREV_9565(ah))
return;
- ah->shared_chain_lnadiv = enable;
+ if (AR_SREV_9485(ah)) {
+ regval = ar9003_hw_ant_ctrl_common_2_get(ah,
+ IS_CHAN_2GHZ(ah->curchan));
+ if (enable) {
+ regval &= ~AR_SWITCH_TABLE_COM2_ALL;
+ regval |= ah->config.ant_ctrl_comm2g_switch_enable;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2,
+ AR_SWITCH_TABLE_COM2_ALL, regval);
+ }
+
ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
+ /*
+ * Set MAIN/ALT LNA conf.
+ * Set MAIN/ALT gain_tb.
+ */
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
regval &= (~AR_ANT_DIV_CTRL_ALL);
regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
- regval &= ~AR_PHY_ANT_DIV_LNADIV;
- regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
-
- if (enable)
- regval |= AR_ANT_DIV_ENABLE;
-
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
- regval = REG_READ(ah, AR_PHY_CCK_DETECT);
- regval &= ~AR_FAST_DIV_ENABLE;
- regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
-
- if (enable)
- regval |= AR_FAST_DIV_ENABLE;
-
- REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
-
- if (enable) {
- REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
- (1 << AR_PHY_ANT_SW_RX_PROT_S));
- if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
- REG_SET_BIT(ah, AR_PHY_RESTART,
- AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
- REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
- AR_BTCOEX_WL_LNADIV_FORCE_ON);
- } else {
- REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
- (1 << AR_PHY_ANT_SW_RX_PROT_S));
- REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
- REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
- AR_BTCOEX_WL_LNADIV_FORCE_ON);
-
+ if (AR_SREV_9485_11_OR_LATER(ah)) {
+ /*
+ * Enable LNA diversity.
+ */
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
- regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
- AR_PHY_ANT_DIV_ALT_LNACONF |
- AR_PHY_ANT_DIV_MAIN_GAINTB |
- AR_PHY_ANT_DIV_ALT_GAINTB);
- regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S);
- regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ regval &= ~AR_PHY_ANT_DIV_LNADIV;
+ regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+ if (enable)
+ regval |= AR_ANT_DIV_ENABLE;
+
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+ /*
+ * Enable fast antenna diversity.
+ */
+ regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regval &= ~AR_FAST_DIV_ENABLE;
+ regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+ if (enable)
+ regval |= AR_FAST_DIV_ENABLE;
+
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_DIV_ALT_GAINTB |
+ AR_PHY_ANT_DIV_MAIN_GAINTB));
+ /*
+ * Set MAIN to LNA1 and ALT to LNA2 at the
+ * beginning.
+ */
+ regval |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+ }
+ } else if (AR_SREV_9565(ah)) {
+ if (enable) {
+ REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+ if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+ REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_DIV_MAIN_GAINTB |
+ AR_PHY_ANT_DIV_ALT_GAINTB);
+ regval |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+ }
}
}
+#endif
+
static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
struct ath9k_channel *chan,
u8 *ini_reloaded)
@@ -1518,6 +1565,18 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ /*
+ * CUS217 mix LNA mode.
+ */
+ if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
+ 1, regWrites);
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+ modesIndex, regWrites);
+ }
+ }
+
/*
* For 5GHz channels requiring Fast Clock, apply
* different modal values.
@@ -1528,7 +1587,11 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
if (AR_SREV_9565(ah))
REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
- REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
+ /*
+ * JAPAN regulatory.
+ */
+ if (chan->channel == 2484)
+ ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
ah->modes_index = modesIndex;
*ini_reloaded = true;
@@ -1631,11 +1694,14 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
- ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
ops->spectral_scan_config = ar9003_hw_spectral_scan_config;
ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger;
ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait;
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
+#endif
+
ar9003_hw_set_nf_limits(ah);
ar9003_hw_set_radar_conf(ah);
memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index d4d39f305a0b..6fd752321e36 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -148,6 +148,8 @@
#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX 0x0000FF00
+#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX_S 8
#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
#define AR_PHY_EXT_MINCCA_PWR_S 16
#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L
@@ -296,11 +298,6 @@
#define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000
#define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30
-#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2 0x0
-#define AR_PHY_ANT_DIV_LNA2 0x1
-#define AR_PHY_ANT_DIV_LNA1 0x2
-#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2 0x3
-
#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index c1224b5a257b..2ee35f677c0e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -72,17 +72,12 @@ struct ath_config {
/*************************/
#define ATH_TXBUF_RESET(_bf) do { \
- (_bf)->bf_stale = false; \
(_bf)->bf_lastbf = NULL; \
(_bf)->bf_next = NULL; \
memset(&((_bf)->bf_state), 0, \
sizeof(struct ath_buf_state)); \
} while (0)
-#define ATH_RXBUF_RESET(_bf) do { \
- (_bf)->bf_stale = false; \
- } while (0)
-
/**
* enum buffer_type - Buffer type flags
*
@@ -137,7 +132,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AGGR_ENCRYPTDELIM 10
/* minimum h/w qdepth to be sustained to maximize aggregation */
#define ATH_AGGR_MIN_QDEPTH 2
-#define ATH_AMPDU_SUBFRAME_DEFAULT 32
+/* minimum h/w qdepth for non-aggregated traffic */
+#define ATH_NON_AGGR_MIN_QDEPTH 8
#define IEEE80211_SEQ_SEQ_SHIFT 4
#define IEEE80211_SEQ_MAX 4096
@@ -174,12 +170,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_TX_COMPLETE_POLL_INT 1000
-enum ATH_AGGR_STATUS {
- ATH_AGGR_DONE,
- ATH_AGGR_BAW_CLOSED,
- ATH_AGGR_LIMITED,
-};
-
#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
@@ -201,10 +191,10 @@ struct ath_txq {
struct ath_atx_ac {
struct ath_txq *txq;
- int sched;
struct list_head list;
struct list_head tid_q;
bool clear_ps_filter;
+ bool sched;
};
struct ath_frame_info {
@@ -212,14 +202,16 @@ struct ath_frame_info {
int framelen;
enum ath9k_key_type keytype;
u8 keyix;
- u8 retries;
u8 rtscts_rate;
+ u8 retries : 7;
+ u8 baw_tracked : 1;
};
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
u8 ndelim;
+ bool stale;
u16 seqno;
unsigned long bfs_paprd_timestamp;
};
@@ -233,7 +225,6 @@ struct ath_buf {
void *bf_desc; /* virtual addr of desc */
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
- bool bf_stale;
struct ieee80211_tx_rate rates[4];
struct ath_buf_state bf_state;
};
@@ -241,16 +232,18 @@ struct ath_buf {
struct ath_atx_tid {
struct list_head list;
struct sk_buff_head buf_q;
+ struct sk_buff_head retry_q;
struct ath_node *an;
struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
- int bar_index;
u16 seq_start;
u16 seq_next;
u16 baw_size;
- int tidno;
+ u8 tidno;
int baw_head; /* first un-acked tx buffer */
int baw_tail; /* next unused tx buffer slot */
+
+ s8 bar_index;
bool sched;
bool paused;
bool active;
@@ -262,16 +255,13 @@ struct ath_node {
struct ieee80211_vif *vif; /* interface with which we're associated */
struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
struct ath_atx_ac ac[IEEE80211_NUM_ACS];
- int ps_key;
u16 maxampdu;
u8 mpdudensity;
+ s8 ps_key;
bool sleeping;
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- struct dentry *node_stat;
-#endif
+ bool no_ps_filter;
};
struct ath_tx_control {
@@ -317,6 +307,7 @@ struct ath_rx {
struct ath_descdma rxdma;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+ struct ath_buf *buf_hold;
struct sk_buff *frag;
u32 ampdu_ref;
@@ -367,6 +358,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
/********/
struct ath_vif {
+ struct ath_node mcast_node;
int av_bslot;
bool primary_sta_vif;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
@@ -428,6 +420,7 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
+bool ath9k_csa_is_finished(struct ath_softc *sc);
/*******************/
/* Link Monitoring */
@@ -585,19 +578,14 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
#define ATH_ANT_DIV_COMB_MAX_COUNT 100
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
-enum ath9k_ant_div_comb_lna_conf {
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
- ATH_ANT_DIV_COMB_LNA2,
- ATH_ANT_DIV_COMB_LNA1,
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
-};
-
struct ath_ant_comb {
u16 count;
u16 total_pkt_count;
@@ -614,27 +602,36 @@ struct ath_ant_comb {
int rssi_first;
int rssi_second;
int rssi_third;
+ int ant_ratio;
+ int ant_ratio2;
bool alt_good;
int quick_scan_cnt;
- int main_conf;
+ enum ath9k_ant_div_comb_lna_conf main_conf;
enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
bool first_ratio;
bool second_ratio;
unsigned long scan_start_time;
+
+ /*
+ * Card-specific config values.
+ */
+ int low_rssi_thresh;
+ int fast_div_bias;
};
void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
-void ath_ant_comb_update(struct ath_softc *sc);
/********************/
/* Main driver core */
/********************/
-#define ATH9K_PCI_CUS198 0x0001
-#define ATH9K_PCI_CUS230 0x0002
-#define ATH9K_PCI_CUS217 0x0004
-#define ATH9K_PCI_WOW 0x0008
+#define ATH9K_PCI_CUS198 0x0001
+#define ATH9K_PCI_CUS230 0x0002
+#define ATH9K_PCI_CUS217 0x0004
+#define ATH9K_PCI_WOW 0x0008
+#define ATH9K_PCI_BT_ANT_DIV 0x0010
+#define ATH9K_PCI_D3_L1_WAR 0x0020
/*
* Default cache line size, in bytes.
@@ -761,6 +758,7 @@ struct ath_softc {
#endif
struct ath_descdma txsdma;
+ struct ieee80211_vif *csa_vif;
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1a17732bb089..b5c16b3a37b9 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -291,6 +291,23 @@ void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
(unsigned long long)tsfadjust, avp->av_bslot);
}
+bool ath9k_csa_is_finished(struct ath_softc *sc)
+{
+ struct ieee80211_vif *vif;
+
+ vif = sc->csa_vif;
+ if (!vif || !vif->csa_active)
+ return false;
+
+ if (!ieee80211_csa_is_complete(vif))
+ return false;
+
+ ieee80211_csa_finish(vif);
+
+ sc->csa_vif = NULL;
+ return true;
+}
+
void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
@@ -336,6 +353,10 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
}
+ /* EDMA devices check that in the tx completion function. */
+ if (!edma && ath9k_csa_is_finished(sc))
+ return;
+
slot = ath9k_beacon_choose_slot(sc);
vif = sc->beacon.bslot[slot];
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 344fdde1d7a3..d3063c21e16c 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -49,37 +49,40 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
}
EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
-static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
{
u32 chanmode = 0;
- switch (chan->band) {
+ switch (chandef->chan->band) {
case IEEE80211_BAND_2GHZ:
- switch (channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
chanmode = CHANNEL_G_HT20;
break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_G_HT40PLUS;
+ case NL80211_CHAN_WIDTH_40:
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ chanmode = CHANNEL_G_HT40PLUS;
+ else
+ chanmode = CHANNEL_G_HT40MINUS;
break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_G_HT40MINUS;
+ default:
break;
}
break;
case IEEE80211_BAND_5GHZ:
- switch (channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
chanmode = CHANNEL_A_HT20;
break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_A_HT40PLUS;
+ case NL80211_CHAN_WIDTH_40:
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ chanmode = CHANNEL_A_HT40PLUS;
+ else
+ chanmode = CHANNEL_A_HT40MINUS;
break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_A_HT40MINUS;
+ default:
break;
}
break;
@@ -94,13 +97,12 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
* Update internal channel flags.
*/
void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+ struct cfg80211_chan_def *chandef)
{
- ichan->channel = chan->center_freq;
- ichan->chan = chan;
+ ichan->channel = chandef->chan->center_freq;
+ ichan->chan = chandef->chan;
- if (chan->band == IEEE80211_BAND_2GHZ) {
+ if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
ichan->chanmode = CHANNEL_G;
ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
} else {
@@ -108,8 +110,22 @@ void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
}
- if (channel_type != NL80211_CHAN_NO_HT)
- ichan->chanmode = ath9k_get_extchanmode(chan, channel_type);
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ ichan->channelFlags |= CHANNEL_QUARTER;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ ichan->channelFlags |= CHANNEL_HALF;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ ichan->chanmode = ath9k_get_extchanmode(chandef);
+ break;
+ default:
+ WARN_ON(1);
+ }
}
EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
@@ -125,8 +141,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
chan_idx = curchan->hw_value;
channel = &ah->channels[chan_idx];
- ath9k_cmn_update_ichannel(channel, curchan,
- cfg80211_get_chandef_type(&hw->conf.chandef));
+ ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
return channel;
}
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 207d06995b15..e039bcbfbd79 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -44,8 +44,7 @@
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type);
+ struct cfg80211_chan_def *chandef);
struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
struct ath_hw *ah);
int ath9k_cmn_count_streams(unsigned int chainmask, int max);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 87454f6c7b4f..c088744a6bfb 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -88,90 +88,6 @@ static const struct file_operations fops_debug = {
#define DMA_BUF_LEN 1024
-static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "0x%08x\n", ah->txchainmask);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- unsigned long mask;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &mask))
- return -EINVAL;
-
- ah->txchainmask = mask;
- ah->caps.tx_chainmask = mask;
- return count;
-}
-
-static const struct file_operations fops_tx_chainmask = {
- .read = read_file_tx_chainmask,
- .write = write_file_tx_chainmask,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-
-static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "0x%08x\n", ah->rxchainmask);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- unsigned long mask;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &mask))
- return -EINVAL;
-
- ah->rxchainmask = mask;
- ah->caps.rx_chainmask = mask;
- return count;
-}
-
-static const struct file_operations fops_rx_chainmask = {
- .read = read_file_rx_chainmask,
- .write = write_file_rx_chainmask,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -270,25 +186,29 @@ static const struct file_operations fops_ani = {
.llseek = default_llseek,
};
-static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static ssize_t read_file_bt_ant_diversity(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", common->antenna_diversity);
+ len = sprintf(buf, "%d\n", common->bt_ant_diversity);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static ssize_t write_file_ant_diversity(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t write_file_bt_ant_diversity(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- unsigned long antenna_diversity;
+ struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps;
+ unsigned long bt_ant_diversity;
char buf[32];
ssize_t len;
@@ -296,26 +216,147 @@ static ssize_t write_file_ant_diversity(struct file *file,
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
- if (!AR_SREV_9565(sc->sc_ah))
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
goto exit;
buf[len] = '\0';
- if (kstrtoul(buf, 0, &antenna_diversity))
+ if (kstrtoul(buf, 0, &bt_ant_diversity))
return -EINVAL;
- common->antenna_diversity = !!antenna_diversity;
+ common->bt_ant_diversity = !!bt_ant_diversity;
ath9k_ps_wakeup(sc);
- ath_ant_comb_update(sc);
- ath_dbg(common, CONFIG, "Antenna diversity: %d\n",
- common->antenna_diversity);
+ ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity);
+ ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n",
+ common->bt_ant_diversity);
ath9k_ps_restore(sc);
exit:
return count;
}
-static const struct file_operations fops_ant_diversity = {
- .read = read_file_ant_diversity,
- .write = write_file_ant_diversity,
+static const struct file_operations fops_bt_ant_diversity = {
+ .read = read_file_bt_ant_diversity,
+ .write = write_file_bt_ant_diversity,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#endif
+
+void ath9k_debug_stat_ant(struct ath_softc *sc,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg)
+{
+ struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
+ struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
+
+ as_main->lna_attempt_cnt[div_ant_conf->main_lna_conf]++;
+ as_alt->lna_attempt_cnt[div_ant_conf->alt_lna_conf]++;
+
+ as_main->rssi_avg = main_rssi_avg;
+ as_alt->rssi_avg = alt_rssi_avg;
+}
+
+static ssize_t read_file_antenna_diversity(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
+ struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
+ struct ath_hw_antcomb_conf div_ant_conf;
+ unsigned int len = 0, size = 1024;
+ ssize_t retval = 0;
+ char *buf;
+ char *lna_conf_str[4] = {"LNA1_MINUS_LNA2",
+ "LNA2",
+ "LNA1",
+ "LNA1_PLUS_LNA2"};
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
+ len += snprintf(buf + len, size - len, "%s\n",
+ "Antenna Diversity Combining is disabled");
+ goto exit;
+ }
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
+ len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
+ lna_conf_str[div_ant_conf.main_lna_conf]);
+ len += snprintf(buf + len, size - len, "Current ALT config : %s\n",
+ lna_conf_str[div_ant_conf.alt_lna_conf]);
+ len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
+ as_main->rssi_avg);
+ len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
+ as_alt->rssi_avg);
+ ath9k_ps_restore(sc);
+
+ len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+ len += snprintf(buf + len, size - len, "-------------------\n");
+
+ len += snprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "TOTAL COUNT",
+ as_main->recv_cnt,
+ as_alt->recv_cnt);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+ len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+ len += snprintf(buf + len, size - len, "--------------------\n");
+
+ len += snprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_antenna_diversity = {
+ .read = read_file_antenna_diversity,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@@ -607,6 +648,28 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
return retval;
}
+static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
+ char *buf, ssize_t size)
+{
+ ssize_t len = 0;
+
+ ath_txq_lock(sc, txq);
+
+ len += snprintf(buf + len, size - len, "%s: %d ",
+ "qnum", txq->axq_qnum);
+ len += snprintf(buf + len, size - len, "%s: %2d ",
+ "qdepth", txq->axq_depth);
+ len += snprintf(buf + len, size - len, "%s: %2d ",
+ "ampdu-depth", txq->axq_ampdu_depth);
+ len += snprintf(buf + len, size - len, "%s: %3d ",
+ "pending", txq->pending_frames);
+ len += snprintf(buf + len, size - len, "%s: %d\n",
+ "stopped", txq->stopped);
+
+ ath_txq_unlock(sc, txq);
+ return len;
+}
+
static ssize_t read_file_queues(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -624,24 +687,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
- len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
-
- ath_txq_lock(sc, txq);
-
- len += snprintf(buf + len, size - len, "%s: %d ",
- "qnum", txq->axq_qnum);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "qdepth", txq->axq_depth);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "ampdu-depth", txq->axq_ampdu_depth);
- len += snprintf(buf + len, size - len, "%s: %3d ",
- "pending", txq->pending_frames);
- len += snprintf(buf + len, size - len, "%s: %d\n",
- "stopped", txq->stopped);
-
- ath_txq_unlock(sc, txq);
+ len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
+ len += print_queue(sc, txq, buf + len, size - len);
}
+ len += snprintf(buf + len, size - len, "(CAB): ");
+ len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
+
if (len > size)
len = size;
@@ -1589,17 +1641,7 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
- an->node_stat = debugfs_create_file("node_stat", S_IRUGO,
- dir, an, &fops_node_stat);
-}
-
-void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir)
-{
- struct ath_node *an = (struct ath_node *)sta->drv_priv;
- debugfs_remove(an->node_stat);
+ debugfs_create_file("node_stat", S_IRUGO, dir, an, &fops_node_stat);
}
/* Ethtool support for get-stats */
@@ -1770,10 +1812,10 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_reset);
debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_recv);
- debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
- debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
+ debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
+ &ah->rxchainmask);
+ debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
+ &ah->txchainmask);
debugfs_create_file("ani", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_ani);
debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
@@ -1814,9 +1856,11 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
- debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_ant_diversity);
+ debugfs_create_file("antenna_diversity", S_IRUSR,
+ sc->debug.debugfs_phy, sc, &fops_antenna_diversity);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity);
debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_btcoex);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index fc679198a0f3..6e1556fa2f3e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -28,9 +28,13 @@ struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
+#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
+#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
#else
#define TX_STAT_INC(q, c) do { } while (0)
#define RESET_STAT_INC(sc, type) do { } while (0)
+#define ANT_STAT_INC(i, c) do { } while (0)
+#define ANT_LNA_INC(i, c) do { } while (0)
#endif
enum ath_reset_type {
@@ -243,11 +247,22 @@ struct ath_rx_stats {
u32 rx_spectral;
};
+#define ANT_MAIN 0
+#define ANT_ALT 1
+
+struct ath_antenna_stats {
+ u32 recv_cnt;
+ u32 rssi_avg;
+ u32 lna_recv_cnt[4];
+ u32 lna_attempt_cnt[4];
+};
+
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
struct ath_rx_stats rxstats;
struct ath_dfs_stats dfs_stats;
+ struct ath_antenna_stats ant_stats[2];
u32 reset[__RESET_TYPE_MAX];
};
@@ -277,14 +292,11 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
-void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir);
-
void ath_debug_send_fft_sample(struct ath_softc *sc,
struct fft_sample_tlv *fft_sample);
-
+void ath9k_debug_stat_ant(struct ath_softc *sc,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg);
#else
#define RX_STAT_INC(c) /* NOP */
@@ -297,12 +309,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
static inline void ath9k_deinit_debug(struct ath_softc *sc)
{
}
-
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
enum ath9k_int status)
{
}
-
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_buf *bf,
struct ath_tx_status *ts,
@@ -310,11 +320,16 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
unsigned int flags)
{
}
-
static inline void ath_debug_stat_rx(struct ath_softc *sc,
struct ath_rx_status *rs)
{
}
+static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg)
+{
+
+}
#endif /* CONFIG_ATH9K_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index c2bfd748eed8..9ea8e4b779c9 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -812,6 +812,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
struct modal_eep_4k_header *pModal;
struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
@@ -858,6 +859,24 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal);
regVal = REG_READ(ah, AR_PHY_CCK_DETECT);
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+ /*
+ * If diversity combining is enabled,
+ * set MAIN to LNA1 and ALT to LNA2 initially.
+ */
+ regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ regVal &= (~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_9285_ANT_DIV_ALT_LNACONF));
+
+ regVal |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S);
+ regVal |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_9285_ANT_DIV_ALT_LNACONF_S);
+ regVal &= (~(AR_PHY_9285_FAST_DIV_BIAS));
+ regVal |= (0 << AR_PHY_9285_FAST_DIV_BIAS_S);
+ REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal);
+ }
}
if (pModal->version >= 2) {
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5205a3625e84..6d5d716adc1b 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -115,10 +115,10 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
cmd->skb = skb;
cmd->hif_dev = hif_dev;
- usb_fill_bulk_urb(urb, hif_dev->udev,
- usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE),
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
skb->data, skb->len,
- hif_usb_regout_cb, cmd);
+ hif_usb_regout_cb, cmd, 1);
usb_anchor_urb(urb, &hif_dev->regout_submitted);
ret = usb_submit_urb(urb, GFP_KERNEL);
@@ -723,11 +723,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
}
- usb_fill_bulk_urb(urb, hif_dev->udev,
- usb_rcvbulkpipe(hif_dev->udev,
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, nskb);
+ ath9k_hif_usb_reg_in_cb, nskb, 1);
}
resubmit:
@@ -909,11 +909,11 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
- usb_fill_bulk_urb(urb, hif_dev->udev,
- usb_rcvbulkpipe(hif_dev->udev,
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, skb);
+ ath9k_hif_usb_reg_in_cb, skb, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@@ -1031,9 +1031,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
{
- struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
- struct usb_endpoint_descriptor *endp;
- int ret, idx;
+ int ret;
ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret) {
@@ -1043,20 +1041,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
return ret;
}
- /* On downloading the firmware to the target, the USB descriptor of EP4
- * is 'patched' to change the type of the endpoint to Bulk. This will
- * bring down CPU usage during the scan period.
- */
- for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
- endp = &alt->endpoint[idx].desc;
- if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_INT) {
- endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
- endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
- endp->bInterval = 0;
- }
- }
-
/* Alloc URBs */
ret = ath9k_hif_usb_alloc_urbs(hif_dev);
if (ret) {
@@ -1268,7 +1252,7 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
if (!buf)
return;
- ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
+ ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE),
buf, 4, NULL, HZ);
if (ret)
dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 5c1bec18c9e3..d44258172c0f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1203,16 +1203,13 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
struct ieee80211_channel *curchan = hw->conf.chandef.chan;
- enum nl80211_channel_type channel_type =
- cfg80211_get_chandef_type(&hw->conf.chandef);
int pos = curchan->hw_value;
ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
- hw->conf.chandef.chan,
- channel_type);
+ &hw->conf.chandef);
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 14b701140b49..83f4927aeaca 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,13 +78,16 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
}
-static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
- bool enable)
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
{
- if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv)
- ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable);
+ if (ath9k_hw_ops(ah)->set_bt_ant_diversity)
+ ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
}
+#endif
+
/* Private hardware call ops */
/* PHY ops */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 4ca0cb060106..ecc6ec4a1edb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -450,7 +450,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
ah->config.pcie_clock_req = 0;
- ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
@@ -575,18 +574,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
* We need to do this to avoid RMW of this register. We cannot
* read the reg when chip is asleep.
*/
- ah->WARegVal = REG_READ(ah, AR_WA);
- ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
- AR_WA_ASPM_TIMER_BASED_DISABLE);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->WARegVal = REG_READ(ah, AR_WA);
+ ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+ AR_WA_ASPM_TIMER_BASED_DISABLE);
+ }
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_err(common, "Couldn't reset chip\n");
return -EIO;
}
- if (AR_SREV_9462(ah))
- ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
-
if (AR_SREV_9565(ah)) {
ah->WARegVal |= AR_WA_BIT22;
REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -656,8 +654,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_cal_settings(ah);
ah->ani_function = ATH9K_ANI_ALL;
- if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
- ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
@@ -1069,7 +1065,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
tx_lat += 11;
- sifstime *= 2;
+ sifstime = 32;
ack_offset = 16;
slottime = 13;
} else if (IS_CHAN_QUARTER_RATE(chan)) {
@@ -1079,7 +1075,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
tx_lat += 22;
- sifstime *= 4;
+ sifstime = 64;
ack_offset = 32;
slottime = 21;
} else {
@@ -1116,7 +1112,6 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
ctstimeout += 48 - sifstime - ah->slottime;
}
-
ath9k_hw_set_sifs_time(ah, sifstime);
ath9k_hw_setslottime(ah, slottime);
ath9k_hw_set_ack_timeout(ah, acktimeout);
@@ -1496,16 +1491,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ bool band_switch = false, mode_diff = false;
+ u8 ini_reloaded = 0;
u32 qnum;
int r;
- bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
- bool band_switch, mode_diff;
- u8 ini_reloaded;
- band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) !=
- (ah->curchan->channelFlags & (CHANNEL_2GHZ |
- CHANNEL_5GHZ));
- mode_diff = (chan->chanmode != ah->curchan->chanmode);
+ if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
+ u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
+ u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
+ band_switch = (cur != new);
+ mode_diff = (chan->chanmode != ah->curchan->chanmode);
+ }
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
if (ath9k_hw_numtxpending(ah, qnum)) {
@@ -1520,11 +1517,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
return false;
}
- if (edma && (band_switch || mode_diff)) {
+ if (band_switch || mode_diff) {
ath9k_hw_mark_phy_inactive(ah);
udelay(5);
- ath9k_hw_init_pll(ah, NULL);
+ if (band_switch)
+ ath9k_hw_init_pll(ah, chan);
if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) {
ath_err(common, "Failed to do fast channel change\n");
@@ -1541,22 +1539,21 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
}
ath9k_hw_set_clockrate(ah);
ath9k_hw_apply_txpower(ah, chan, false);
- ath9k_hw_rfbus_done(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
- if (edma && (band_switch || mode_diff)) {
- ah->ah_flags |= AH_FASTCC;
- if (band_switch || ini_reloaded)
- ah->eep_ops->set_board_values(ah, chan);
+ if (band_switch || ini_reloaded)
+ ah->eep_ops->set_board_values(ah, chan);
- ath9k_hw_init_bb(ah, chan);
+ ath9k_hw_init_bb(ah, chan);
+ ath9k_hw_rfbus_done(ah);
- if (band_switch || ini_reloaded)
- ath9k_hw_init_cal(ah, chan);
+ if (band_switch || ini_reloaded) {
+ ah->ah_flags |= AH_FASTCC;
+ ath9k_hw_init_cal(ah, chan);
ah->ah_flags &= ~AH_FASTCC;
}
@@ -1778,16 +1775,11 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
/*
* Fast channel change:
* (Change synthesizer based on channel freq without resetting chip)
- *
- * Don't do FCC when
- * - Flag is not set
- * - Chip is just coming out of full sleep
- * - Channel to be set is same as current channel
- * - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel)
*/
static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
int ret;
if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
@@ -1806,9 +1798,21 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
(CHANNEL_HALF | CHANNEL_QUARTER))
goto fail;
- if ((chan->channelFlags & CHANNEL_ALL) !=
- (ah->curchan->channelFlags & CHANNEL_ALL))
- goto fail;
+ /*
+ * If cross-band fcc is not supoprted, bail out if
+ * either channelFlags or chanmode differ.
+ *
+ * chanmode will be different if the HT operating mode
+ * changes because of CSA.
+ */
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
+ if ((chan->channelFlags & CHANNEL_ALL) !=
+ (ah->curchan->channelFlags & CHANNEL_ALL))
+ goto fail;
+
+ if (chan->chanmode != ah->curchan->chanmode)
+ goto fail;
+ }
if (!ath9k_hw_check_alive(ah))
goto fail;
@@ -2047,7 +2051,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_apply_gpio_override(ah);
- if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv)
+ if (AR_SREV_9565(ah) && common->bt_ant_diversity)
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
return 0;
@@ -2504,7 +2508,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->rts_aggr_limit = (8 * 1024);
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
+#ifdef CONFIG_ATH9K_RFKILL
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
ah->rfkill_gpio =
@@ -2550,34 +2554,28 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
- if (AR_SREV_9285(ah))
+ if (AR_SREV_9285(ah)) {
if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) {
ant_div_ctl1 =
ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
- if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1))
+ if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) {
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
+ ath_info(common, "Enable LNA combining\n");
+ }
}
+ }
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE))
pCap->hw_caps |= ATH9K_HW_CAP_APM;
}
-
if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
- /*
- * enable the diversity-combining algorithm only when
- * both enable_lna_div and enable_fast_div are set
- * Table for Diversity
- * ant_div_alt_lnaconf bit 0-1
- * ant_div_main_lnaconf bit 2-3
- * ant_div_alt_gaintb bit 4
- * ant_div_main_gaintb bit 5
- * enable_ant_div_lnadiv bit 6
- * enable_ant_fast_div bit 7
- */
- if ((ant_div_ctl1 >> 0x6) == 0x3)
+ if ((ant_div_ctl1 >> 0x6) == 0x3) {
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
+ ath_info(common, "Enable LNA combining\n");
+ }
}
if (ath9k_hw_dfs_tested(ah))
@@ -2610,6 +2608,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
+ /*
+ * Fast channel change across bands is available
+ * only for AR9462 and AR9565.
+ */
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index cd74b3afef7d..69a907b55a73 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -247,6 +247,8 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_DFS = BIT(16),
ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
ATH9K_HW_CAP_PAPRD = BIT(18),
+ ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(19),
+ ATH9K_HW_CAP_BT_ANT_DIV = BIT(20),
};
/*
@@ -309,8 +311,11 @@ struct ath9k_ops_config {
u16 ani_poll_interval; /* ANI poll interval in ms */
/* Platform specific config */
+ u32 aspm_l1_fix;
u32 xlna_gpio;
+ u32 ant_ctrl_comm2g_switch_enable;
bool xatten_margin_cfg;
+ bool alt_mingainidx;
};
enum ath9k_int {
@@ -716,11 +721,14 @@ struct ath_hw_ops {
struct ath_hw_antcomb_conf *antconf);
void (*antdiv_comb_conf_set)(struct ath_hw *ah,
struct ath_hw_antcomb_conf *antconf);
- void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
void (*spectral_scan_config)(struct ath_hw *ah,
struct ath_spec_scan *param);
void (*spectral_scan_trigger)(struct ath_hw *ah);
void (*spectral_scan_wait)(struct ath_hw *ah);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
+#endif
};
struct ath_nf_limits {
@@ -765,7 +773,6 @@ struct ath_hw {
bool aspm_enabled;
bool is_monitoring;
bool need_an_top2_fixup;
- bool shared_chain_lnadiv;
u16 tx_trig_level;
u32 nf_regs[6];
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 026a2a067b46..9a1f349f9260 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -53,9 +53,9 @@ static int ath9k_btcoex_enable;
module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
-static int ath9k_enable_diversity;
-module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
-MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
+static int ath9k_bt_ant_diversity;
+module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
+MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -146,14 +146,22 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0x0b, 0),
- RATE(90, 0x0f, 0),
- RATE(120, 0x0a, 0),
- RATE(180, 0x0e, 0),
- RATE(240, 0x09, 0),
- RATE(360, 0x0d, 0),
- RATE(480, 0x08, 0),
- RATE(540, 0x0c, 0),
+ RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
};
#ifdef CONFIG_MAC80211_LEDS
@@ -516,6 +524,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
static void ath9k_init_platform(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
if (common->bus_ops->ath_bus_type != ATH_PCI)
@@ -525,12 +534,27 @@ static void ath9k_init_platform(struct ath_softc *sc)
ATH9K_PCI_CUS230)) {
ah->config.xlna_gpio = 9;
ah->config.xatten_margin_cfg = true;
+ ah->config.alt_mingainidx = true;
+ ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88;
+ sc->ant_comb.low_rssi_thresh = 20;
+ sc->ant_comb.fast_div_bias = 3;
ath_info(common, "Set parameters for %s\n",
(sc->driver_data & ATH9K_PCI_CUS198) ?
"CUS198" : "CUS230");
- } else if (sc->driver_data & ATH9K_PCI_CUS217) {
+ }
+
+ if (sc->driver_data & ATH9K_PCI_CUS217)
ath_info(common, "CUS217 card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
+ pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
+ ath_info(common, "Set BT/WLAN RX diversity capability\n");
+ }
+
+ if (sc->driver_data & ATH9K_PCI_D3_L1_WAR) {
+ ah->config.pcie_waen = 0x0040473b;
+ ath_info(common, "Enable WAR for ASPM D3/L1\n");
}
}
@@ -584,6 +608,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
{
struct ath9k_platform_data *pdata = sc->dev->platform_data;
struct ath_hw *ah = NULL;
+ struct ath9k_hw_capabilities *pCap;
struct ath_common *common;
int ret = 0, i;
int csz = 0;
@@ -600,6 +625,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->reg_ops.rmw = ath9k_reg_rmw;
atomic_set(&ah->intr_ref_cnt, -1);
sc->sc_ah = ah;
+ pCap = &ah->caps;
sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
@@ -631,11 +657,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath9k_init_platform(sc);
/*
- * Enable Antenna diversity only when BTCOEX is disabled
- * and the user manually requests the feature.
+ * Enable WLAN/BT RX Antenna diversity only when:
+ *
+ * - BTCOEX is disabled.
+ * - the user manually requests the feature.
+ * - the HW cap is set using the platform data.
*/
- if (!common->btcoex_enabled && ath9k_enable_diversity)
- common->antenna_diversity = 1;
+ if (!common->btcoex_enabled && ath9k_bt_ant_diversity &&
+ (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
+ common->bt_ant_diversity = 1;
spin_lock_init(&common->cc_lock);
@@ -710,13 +740,15 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct ath_hw *ah = sc->sc_ah;
+ struct cfg80211_chan_def chandef;
int i;
sband = &sc->sbands[band];
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
ah->curchan = &ah->channels[chan->hw_value];
- ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
+ ath9k_cmn_update_ichannel(ah->curchan, &chandef);
ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
}
}
@@ -835,6 +867,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
#ifdef CONFIG_PM_SLEEP
if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index fff5d3ccc663..2f831db396ac 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -41,7 +41,7 @@ void ath_tx_complete_poll_work(struct work_struct *work)
txq->axq_tx_inprogress = true;
}
}
- ath_txq_unlock_complete(sc, txq);
+ ath_txq_unlock(sc, txq);
}
if (needreset) {
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 2ef05ebffbcf..a3eff0986a3f 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -583,9 +583,9 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
+ rs->rs_firstaggr = (ads.ds_rxstatus8 & AR_RxFirstAggr) ? 1 : 0;
rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
- rs->rs_moreaggr =
- (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
+ rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
/* directly mapped flags for ieee80211_rx_status */
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index b02dfce964b4..bfccaceed44e 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -140,6 +140,7 @@ struct ath_rx_status {
int8_t rs_rssi_ext1;
int8_t rs_rssi_ext2;
u8 rs_isaggr;
+ u8 rs_firstaggr;
u8 rs_moreaggr;
u8 rs_num_delims;
u8 rs_flags;
@@ -569,6 +570,7 @@ struct ar5416_desc {
#define AR_RxAggr 0x00020000
#define AR_PostDelimCRCErr 0x00040000
#define AR_RxStatusRsvd71 0x3ff80000
+#define AR_RxFirstAggr 0x20000000
#define AR_DecryptBusyErr 0x40000000
#define AR_KeyMiss 0x80000000
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index cb5a65553ac7..e4f65900132d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -237,9 +237,6 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath_restart_work(sc);
}
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
- ath_ant_comb_update(sc);
-
ieee80211_wake_queues(sc->hw);
return true;
@@ -965,6 +962,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_node *an = &avp->mcast_node;
mutex_lock(&sc->mutex);
@@ -978,6 +977,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
+ an->sc = sc;
+ an->sta = NULL;
+ an->vif = vif;
+ an->no_ps_filter = true;
+ ath_tx_node_init(sc, an);
+
mutex_unlock(&sc->mutex);
return 0;
}
@@ -1015,6 +1020,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
ath_dbg(common, CONFIG, "Detach Interface\n");
@@ -1025,10 +1031,15 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
+ if (sc->csa_vif == vif)
+ sc->csa_vif = NULL;
+
ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, NULL);
ath9k_ps_restore(sc);
+ ath_tx_node_cleanup(sc, &avp->mcast_node);
+
mutex_unlock(&sc->mutex);
}
@@ -1192,8 +1203,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
struct ieee80211_channel *curchan = hw->conf.chandef.chan;
- enum nl80211_channel_type channel_type =
- cfg80211_get_chandef_type(&conf->chandef);
int pos = curchan->hw_value;
int old_pos = -1;
unsigned long flags;
@@ -1201,8 +1210,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ah->curchan)
old_pos = ah->curchan - &ah->channels[0];
- ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
- curchan->center_freq, channel_type);
+ ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+ curchan->center_freq, hw->conf.chandef.width);
/* update survey stats for the old channel before switching */
spin_lock_irqsave(&common->cc_lock, flags);
@@ -1210,7 +1219,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_irqrestore(&common->cc_lock, flags);
ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
- curchan, channel_type);
+ &conf->chandef);
/*
* If the operating channel changes, change the survey in-use flags
@@ -1373,9 +1382,6 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_node *an = (struct ath_node *) sta->drv_priv;
- if (!sta->ht_cap.ht_supported)
- return;
-
switch (cmd) {
case STA_NOTIFY_SLEEP:
an->sleeping = true;
@@ -2093,7 +2099,7 @@ static void ath9k_wow_add_pattern(struct ath_softc *sc,
{
struct ath_hw *ah = sc->sc_ah;
struct ath9k_wow_pattern *wow_pattern = NULL;
- struct cfg80211_wowlan_trig_pkt_pattern *patterns = wowlan->patterns;
+ struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
int mask_len;
s8 i = 0;
@@ -2314,6 +2320,19 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
clear_bit(SC_OP_SCANNING, &sc->sc_flags);
}
+static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ath_softc *sc = hw->priv;
+
+ /* mac80211 does not support CSA in multi-if cases (yet) */
+ if (WARN_ON(sc->csa_vif))
+ return;
+
+ sc->csa_vif = vif;
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2358,8 +2377,8 @@ struct ieee80211_ops ath9k_ops = {
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
.sta_add_debugfs = ath9k_sta_add_debugfs,
- .sta_remove_debugfs = ath9k_sta_remove_debugfs,
#endif
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
+ .channel_switch_beacon = ath9k_channel_switch_beacon,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index c585c9b35973..d089a7cf01c4 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -29,6 +29,60 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1C71),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE01F),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x11AD, /* LITEON */
+ 0x6632),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x11AD, /* LITEON */
+ 0x6642),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_QMI,
+ 0x0306),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x185F, /* WNC */
+ 0x309D),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x147C),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x147D),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x1536),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+
+ /* AR9285 card for Asus */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002B,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2C37),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
@@ -40,29 +94,106 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x2086),
- .driver_data = ATH9K_PCI_CUS198 },
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x1237),
- .driver_data = ATH9K_PCI_CUS198 },
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x2126),
- .driver_data = ATH9K_PCI_CUS198 },
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x126A),
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
/* PCI-E CUS230 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x2152),
- .driver_data = ATH9K_PCI_CUS230 },
+ .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_FOXCONN,
0xE075),
- .driver_data = ATH9K_PCI_CUS230 },
+ .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB225 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3119),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3122),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x185F, /* WNC */
+ 0x3119),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4105),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4106),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410D),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410E),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410F),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC706),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC680),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC708),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3218),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3219),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
@@ -229,6 +360,22 @@ static void ath_pci_aspm_init(struct ath_common *common)
return;
}
+ /*
+ * 0x70c - Ack Frequency Register.
+ *
+ * Bits 27:29 - DEFAULT_L1_ENTRANCE_LATENCY.
+ *
+ * 000 : 1 us
+ * 001 : 2 us
+ * 010 : 4 us
+ * 011 : 8 us
+ * 100 : 16 us
+ * 101 : 32 us
+ * 110/111 : 64 us
+ */
+ if (AR_SREV_9462(ah))
+ pci_read_config_dword(pdev, 0x70c, &ah->config.aspm_l1_fix);
+
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) {
ah->aspm_enabled = true;
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 8b380305b0fc..4a1b99238ec2 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -48,4 +48,11 @@
#define AR_PHY_PLL_CONTROL 0x16180
#define AR_PHY_PLL_MODE 0x16184
+enum ath9k_ant_div_comb_lna_conf {
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
+ ATH_ANT_DIV_COMB_LNA2,
+ ATH_ANT_DIV_COMB_LNA1,
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
+};
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 7eb1f4b458e4..d3d7c51fa6c8 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1275,15 +1275,21 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
}
static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct ath_softc *sc = priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_rate_priv *ath_rc_priv = priv_sta;
int i, j = 0;
+ u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
for (i = 0; i < sband->n_bitrates; i++) {
if (sta->supp_rates[sband->band] & BIT(i)) {
+ if ((rate_flags & sband->bitrates[i].flags)
+ != rate_flags)
+ continue;
+
ath_rc_priv->neg_rates.rs_rates[j]
= (sband->bitrates[i].bitrate * 2) / 10;
j++;
@@ -1313,6 +1319,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
}
static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed)
{
@@ -1324,8 +1331,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
ath_rc_init(sc, priv_sta);
ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
- "Operating HT Bandwidth changed to: %d\n",
- cfg80211_get_chandef_type(&sc->hw->conf.chandef));
+ "Operating Bandwidth changed to: %d\n",
+ sc->hw->conf.chandef.width);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 865e043e8aa6..4ee472a5a4e4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
struct ath_desc *ds;
struct sk_buff *skb;
- ATH_RXBUF_RESET(bf);
-
ds = bf->bf_desc;
ds->ds_link = 0; /* link to null */
ds->ds_data = bf->bf_buf_addr;
@@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
sc->rx.rxlink = &ds->ds_link;
}
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+{
+ if (sc->rx.buf_hold)
+ ath_rx_buf_link(sc, sc->rx.buf_hold);
+
+ sc->rx.buf_hold = bf;
+}
+
static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
{
/* XXX block beacon interrupts */
@@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
skb = bf->bf_mpdu;
- ATH_RXBUF_RESET(bf);
memset(skb->data, 0, ah->caps.rx_status_len);
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
ah->caps.rx_status_len, DMA_TO_DEVICE);
@@ -185,7 +190,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
{
- skb_queue_head_init(&rx_edma->rx_fifo);
+ __skb_queue_head_init(&rx_edma->rx_fifo);
rx_edma->rx_fifo_hwsize = size;
}
@@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc)
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
+ sc->rx.buf_hold = NULL;
sc->rx.rxlink = NULL;
list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
ath_rx_buf_link(sc, bf);
@@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
}
bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ if (bf == sc->rx.buf_hold)
+ return NULL;
+
ds = bf->bf_desc;
/*
@@ -755,7 +764,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
bool is_mc, is_valid_tkip, strip_mic, mic_error;
struct ath_hw *ah = common->ah;
__le16 fc;
- u8 rx_status_len = ah->caps.rx_status_len;
fc = hdr->frame_control;
@@ -777,25 +785,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
!test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
- if (!rx_stats->rs_datalen) {
- RX_STAT_INC(rx_len_err);
- return false;
- }
-
- /*
- * rs_status follows rs_datalen so if rs_datalen is too large
- * we can take a hint that hardware corrupted it, so ignore
- * those frames.
- */
- if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
- RX_STAT_INC(rx_len_err);
- return false;
- }
-
- /* Only use error bits from the last fragment */
- if (rx_stats->rs_more)
- return true;
-
mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
!ieee80211_has_morefrags(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
@@ -814,8 +803,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
mic_error = false;
}
- if (rx_stats->rs_status & ATH9K_RXERR_PHY)
- return false;
if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
(!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
@@ -865,6 +852,17 @@ static int ath9k_process_rate(struct ath_common *common,
band = hw->conf.chandef.chan->band;
sband = hw->wiphy->bands[band];
+ switch (hw->conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ rxs->flag |= RX_FLAG_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ rxs->flag |= RX_FLAG_10MHZ;
+ break;
+ default:
+ break;
+ }
+
if (rx_stats->rs_rate & 0x80) {
/* HT rate */
rxs->flag |= RX_FLAG_HT;
@@ -898,129 +896,65 @@ static int ath9k_process_rate(struct ath_common *common,
static void ath9k_process_rssi(struct ath_common *common,
struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr,
- struct ath_rx_status *rx_stats)
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
int rssi = rx_stats->rs_rssi;
- if (!rx_stats->is_mybeacon ||
- ((ah->opmode != NL80211_IFTYPE_STATION) &&
- (ah->opmode != NL80211_IFTYPE_ADHOC)))
+ /*
+ * RSSI is not available for subframes in an A-MPDU.
+ */
+ if (rx_stats->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
return;
-
- if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
- ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
-
- last_rssi = sc->last_rssi;
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
- if (rssi < 0)
- rssi = 0;
-
- /* Update Beacon RSSI, this is used by ANI. */
- ah->stats.avgbrssi = rssi;
-}
-
-/*
- * For Decrypt or Demic errors, we only mark packet status here and always push
- * up the frame up to let mac80211 handle the actual error case, be it no
- * decryption key or real decryption error. This let us keep statistics there.
- */
-static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
- struct ieee80211_hdr *hdr,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rx_status,
- bool *decrypt_error)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- bool discard_current = sc->rx.discard_next;
-
- sc->rx.discard_next = rx_stats->rs_more;
- if (discard_current)
- return -EINVAL;
+ }
/*
- * everything but the rate is checked here, the rate check is done
- * separately to avoid doing two lookups for a rate for each frame.
+ * Check if the RSSI for the last subframe in an A-MPDU
+ * or an unaggregated frame is valid.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
- return -EINVAL;
-
- /* Only use status info from the last fragment */
- if (rx_stats->rs_more)
- return 0;
+ if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
- if (ath9k_process_rate(common, hw, rx_stats, rx_status))
- return -EINVAL;
+ /*
+ * Update Beacon RSSI, this is used by ANI.
+ */
+ if (rx_stats->is_mybeacon &&
+ ((ah->opmode == NL80211_IFTYPE_STATION) ||
+ (ah->opmode == NL80211_IFTYPE_ADHOC))) {
+ ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
+ last_rssi = sc->last_rssi;
- ath9k_process_rssi(common, hw, hdr, rx_stats);
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ if (rssi < 0)
+ rssi = 0;
- rx_status->band = hw->conf.chandef.chan->band;
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->signal = ah->noise + rx_stats->rs_rssi;
- rx_status->antenna = rx_stats->rs_antenna;
- rx_status->flag |= RX_FLAG_MACTIME_END;
- if (rx_stats->rs_moreaggr)
- rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ ah->stats.avgbrssi = rssi;
+ }
- sc->rx.discard_next = false;
- return 0;
+ rxs->signal = ah->noise + rx_stats->rs_rssi;
}
-static void ath9k_rx_skb_postprocess(struct ath_common *common,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- bool decrypt_error)
+static void ath9k_process_tsf(struct ath_rx_status *rs,
+ struct ieee80211_rx_status *rxs,
+ u64 tsf)
{
- struct ath_hw *ah = common->ah;
- struct ieee80211_hdr *hdr;
- int hdrlen, padpos, padsize;
- u8 keyix;
- __le16 fc;
-
- /* see if any padding is done by the hw and remove it */
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- fc = hdr->frame_control;
- padpos = ieee80211_hdrlen(fc);
-
- /* The MAC header is padded to have 32-bit boundary if the
- * packet payload is non-zero. The general calculation for
- * padsize would take into account odd header lengths:
- * padsize = (4 - padpos % 4) % 4; However, since only
- * even-length headers are used, padding can only be 0 or 2
- * bytes and we can optimize this a bit. In addition, we must
- * not try to remove padding from short control frames that do
- * not have payload. */
- padsize = padpos & 3;
- if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, padpos);
- skb_pull(skb, padsize);
- }
+ u32 tsf_lower = tsf & 0xffffffff;
- keyix = rx_stats->rs_keyix;
+ rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
+ if (rs->rs_tstamp > tsf_lower &&
+ unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
+ rxs->mactime -= 0x100000000ULL;
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
- ieee80211_has_protected(fc)) {
- rxs->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc)
- && !decrypt_error && skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
-
- if (test_bit(keyix, common->keymap))
- rxs->flag |= RX_FLAG_DECRYPTED;
- }
- if (ah->sw_mgmt_crypto &&
- (rxs->flag & RX_FLAG_DECRYPTED) &&
- ieee80211_is_mgmt(fc))
- /* Use software decrypt for management frames. */
- rxs->flag &= ~RX_FLAG_DECRYPTED;
+ if (rs->rs_tstamp < tsf_lower &&
+ unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
+ rxs->mactime += 0x100000000ULL;
}
#ifdef CONFIG_ATH9K_DEBUGFS
@@ -1133,6 +1067,234 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
#endif
}
+static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ RX_STAT_INC(rx_beacons);
+ if (!is_zero_ether_addr(common->curbssid) &&
+ ether_addr_equal(hdr->addr3, common->curbssid))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * For Decrypt or Demic errors, we only mark packet status here and always push
+ * up the frame up to let mac80211 handle the actual error case, be it no
+ * decryption key or real decryption error. This let us keep statistics there.
+ */
+static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rx_status,
+ bool *decrypt_error, u64 tsf)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hdr *hdr;
+ bool discard_current = sc->rx.discard_next;
+ int ret = 0;
+
+ /*
+ * Discard corrupt descriptors which are marked in
+ * ath_get_next_rx_buf().
+ */
+ sc->rx.discard_next = rx_stats->rs_more;
+ if (discard_current)
+ return -EINVAL;
+
+ /*
+ * Discard zero-length packets.
+ */
+ if (!rx_stats->rs_datalen) {
+ RX_STAT_INC(rx_len_err);
+ return -EINVAL;
+ }
+
+ /*
+ * rs_status follows rs_datalen so if rs_datalen is too large
+ * we can take a hint that hardware corrupted it, so ignore
+ * those frames.
+ */
+ if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
+ RX_STAT_INC(rx_len_err);
+ return -EINVAL;
+ }
+
+ /* Only use status info from the last fragment */
+ if (rx_stats->rs_more)
+ return 0;
+
+ /*
+ * Return immediately if the RX descriptor has been marked
+ * as corrupt based on the various error bits.
+ *
+ * This is different from the other corrupt descriptor
+ * condition handled above.
+ */
+ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
+
+ ath9k_process_tsf(rx_stats, rx_status, tsf);
+ ath_debug_stat_rx(sc, rx_stats);
+
+ /*
+ * Process PHY errors and return so that the packet
+ * can be dropped.
+ */
+ if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
+ ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
+ if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
+ RX_STAT_INC(rx_spectral);
+
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
+ if (rx_stats->is_mybeacon) {
+ sc->hw_busy_count = 0;
+ ath_start_rx_poll(sc, 3);
+ }
+
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
+ ret =-EINVAL;
+ goto exit;
+ }
+
+ ath9k_process_rssi(common, hw, rx_stats, rx_status);
+
+ rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->antenna = rx_stats->rs_antenna;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ if (ieee80211_is_data_present(hdr->frame_control) &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control))
+ sc->rx.num_pkts++;
+#endif
+
+exit:
+ sc->rx.discard_next = false;
+ return ret;
+}
+
+static void ath9k_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error)
+{
+ struct ath_hw *ah = common->ah;
+ struct ieee80211_hdr *hdr;
+ int hdrlen, padpos, padsize;
+ u8 keyix;
+ __le16 fc;
+
+ /* see if any padding is done by the hw and remove it */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+ padpos = ieee80211_hdrlen(fc);
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - padpos % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ padsize = padpos & 3;
+ if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ keyix = rx_stats->rs_keyix;
+
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc)
+ && !decrypt_error && skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+
+ if (test_bit(keyix, common->keymap))
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ }
+ if (ah->sw_mgmt_crypto &&
+ (rxs->flag & RX_FLAG_DECRYPTED) &&
+ ieee80211_is_mgmt(fc))
+ /* Use software decrypt for management frames. */
+ rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+
+/*
+ * Run the LNA combining algorithm only in these cases:
+ *
+ * Standalone WLAN cards with both LNA/Antenna diversity
+ * enabled in the EEPROM.
+ *
+ * WLAN+BT cards which are in the supported card list
+ * in ath_pci_id_table and the user has loaded the
+ * driver with "bt_ant_diversity" set to true.
+ */
+static void ath9k_antenna_check(struct ath_softc *sc,
+ struct ath_rx_status *rs)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
+ return;
+
+ /*
+ * All MPDUs in an aggregate will use the same LNA
+ * as the first MPDU.
+ */
+ if (rs->rs_isaggr && !rs->rs_firstaggr)
+ return;
+
+ /*
+ * Change the default rx antenna if rx diversity
+ * chooses the other antenna 3 times in a row.
+ */
+ if (sc->rx.defant != rs->rs_antenna) {
+ if (++sc->rx.rxotherant >= 3)
+ ath_setdefantenna(sc, rs->rs_antenna);
+ } else {
+ sc->rx.rxotherant = 0;
+ }
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
+ if (common->bt_ant_diversity)
+ ath_ant_comb_scan(sc, rs);
+ } else {
+ ath_ant_comb_scan(sc, rs);
+ }
+}
+
static void ath9k_apply_ampdu_details(struct ath_softc *sc,
struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
{
@@ -1159,15 +1321,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hw *hw = sc->hw;
- struct ieee80211_hdr *hdr;
int retval;
struct ath_rx_status rs;
enum ath9k_rx_qtype qtype;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int dma_type;
- u8 rx_status_len = ah->caps.rx_status_len;
u64 tsf = 0;
- u32 tsf_lower = 0;
unsigned long flags;
dma_addr_t new_buf_addr;
@@ -1179,7 +1338,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
tsf = ath9k_hw_gettsf64(ah);
- tsf_lower = tsf & 0xffffffff;
do {
bool decrypt_error = false;
@@ -1206,55 +1364,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
else
hdr_skb = skb;
- hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb);
- if (ieee80211_is_beacon(hdr->frame_control)) {
- RX_STAT_INC(rx_beacons);
- if (!is_zero_ether_addr(common->curbssid) &&
- ether_addr_equal(hdr->addr3, common->curbssid))
- rs.is_mybeacon = true;
- else
- rs.is_mybeacon = false;
- }
- else
- rs.is_mybeacon = false;
-
- if (ieee80211_is_data_present(hdr->frame_control) &&
- !ieee80211_is_qos_nullfunc(hdr->frame_control))
- sc->rx.num_pkts++;
-
- ath_debug_stat_rx(sc, &rs);
-
memset(rxs, 0, sizeof(struct ieee80211_rx_status));
- rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
- if (rs.rs_tstamp > tsf_lower &&
- unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
- rxs->mactime -= 0x100000000ULL;
-
- if (rs.rs_tstamp < tsf_lower &&
- unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
- rxs->mactime += 0x100000000ULL;
-
- if (rs.rs_phyerr == ATH9K_PHYERR_RADAR)
- ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime);
-
- if (rs.rs_status & ATH9K_RXERR_PHY) {
- if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
- RX_STAT_INC(rx_spectral);
- goto requeue_drop_frag;
- }
- }
-
- retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs,
- &decrypt_error);
+ retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
+ &decrypt_error, tsf);
if (retval)
goto requeue_drop_frag;
- if (rs.is_mybeacon) {
- sc->hw_busy_count = 0;
- ath_start_rx_poll(sc, 3);
- }
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1308,8 +1425,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
sc->rx.frag = skb;
goto requeue;
}
- if (rs.rs_status & ATH9K_RXERR_CORRUPT_DESC)
- goto requeue_drop_frag;
if (sc->rx.frag) {
int space = skb->len - skb_tailroom(hdr_skb);
@@ -1328,22 +1443,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb = hdr_skb;
}
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
-
- /*
- * change the default rx antenna if rx diversity
- * chooses the other antenna 3 times in a row.
- */
- if (sc->rx.defant != rs.rs_antenna) {
- if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rs.rs_antenna);
- } else {
- sc->rx.rxotherant = 0;
- }
-
- }
-
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
skb_trim(skb, skb->len - 8);
@@ -1355,8 +1454,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
ath_rx_ps(sc, skb, rs.is_mybeacon);
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
- ath_ant_comb_scan(sc, &rs);
+ ath9k_antenna_check(sc, &rs);
ath9k_apply_ampdu_details(sc, &rs, rxs);
@@ -1375,7 +1473,7 @@ requeue:
if (edma) {
ath_rx_edma_buf_link(sc, qtype);
} else {
- ath_rx_buf_link(sc, bf);
+ ath_rx_buf_relink(sc, bf);
ath9k_hw_rxena(ah);
}
} while (1);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 5af97442ac37..a13b2d143d9e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -893,9 +893,9 @@
#define AR_SREV_9485(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
-#define AR_SREV_9485_11(_ah) \
- (AR_SREV_9485(_ah) && \
- ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
+#define AR_SREV_9485_11_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9485_11))
#define AR_SREV_9485_OR_LATER(_ah) \
(((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485))
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 927992732620..35b515fe3ffa 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -135,6 +135,9 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
+ if (!tid->an->sta)
+ return;
+
ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
seqno << IEEE80211_SEQ_SEQ_SHIFT);
}
@@ -168,6 +171,71 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
}
}
+static struct ath_atx_tid *
+ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ u8 tidno = 0;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tidno = ieee80211_get_qos_ctl(hdr)[0];
+
+ tidno &= IEEE80211_QOS_CTL_TID_MASK;
+ return ATH_AN_2_TID(an, tidno);
+}
+
+static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
+{
+ return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
+}
+
+static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&tid->retry_q);
+ if (!skb)
+ skb = __skb_dequeue(&tid->buf_q);
+
+ return skb;
+}
+
+/*
+ * ath_tx_tid_change_state:
+ * - clears a-mpdu flag of previous session
+ * - force sequence number allocation to fix next BlockAck Window
+ */
+static void
+ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+ struct ath_txq *txq = tid->ac->txq;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb, *tskb;
+ struct ath_buf *bf;
+ struct ath_frame_info *fi;
+
+ skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+ if (bf)
+ continue;
+
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ if (!bf) {
+ __skb_unlink(skb, &tid->buf_q);
+ ath_txq_skb_done(sc, txq, skb);
+ ieee80211_free_txskb(sc->hw, skb);
+ continue;
+ }
+ }
+
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
@@ -182,28 +250,22 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
memset(&ts, 0, sizeof(ts));
- while ((skb = __skb_dequeue(&tid->buf_q))) {
+ while ((skb = __skb_dequeue(&tid->retry_q))) {
fi = get_frame_info(skb);
bf = fi->bf;
-
if (!bf) {
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
- if (!bf) {
- ath_txq_skb_done(sc, txq, skb);
- ieee80211_free_txskb(sc->hw, skb);
- continue;
- }
+ ath_txq_skb_done(sc, txq, skb);
+ ieee80211_free_txskb(sc->hw, skb);
+ continue;
}
- if (fi->retries) {
- list_add_tail(&bf->list, &bf_head);
+ if (fi->baw_tracked) {
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
sendbar = true;
- } else {
- ath_set_rates(tid->an->vif, tid->an->sta, bf);
- ath_tx_send_normal(sc, txq, NULL, skb);
}
+
+ list_add_tail(&bf->list, &bf_head);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
if (sendbar) {
@@ -232,13 +294,16 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
}
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- u16 seqno)
+ struct ath_buf *bf)
{
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ u16 seqno = bf->bf_state.seqno;
int index, cindex;
index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
__set_bit(cindex, tid->tx_buf);
+ fi->baw_tracked = 1;
if (index >= ((tid->baw_tail - tid->baw_head) &
(ATH_TID_MAX_BUFS - 1))) {
@@ -247,12 +312,6 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
}
}
-/*
- * TODO: For frame(s) that are in the retry state, we will reuse the
- * sequence number(s) without setting the retry bit. The
- * alternative is to give up on these and BAR the receiver's window
- * forward.
- */
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid)
@@ -266,7 +325,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- while ((skb = __skb_dequeue(&tid->buf_q))) {
+ while ((skb = ath_tid_dequeue(tid))) {
fi = get_frame_info(skb);
bf = fi->bf;
@@ -276,14 +335,8 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
}
list_add_tail(&bf->list, &bf_head);
-
- ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
-
- tid->seq_next = tid->seq_start;
- tid->baw_tail = tid->baw_head;
- tid->bar_index = -1;
}
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
@@ -403,7 +456,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_tx_rate rates[4];
struct ath_frame_info *fi;
int nframes;
- u8 tidno;
bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
int i, retries;
int bar_index = -1;
@@ -429,7 +481,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
while (bf) {
bf_next = bf->bf_next;
- if (!bf->bf_stale || bf_next != NULL)
+ if (!bf->bf_state.stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
@@ -440,8 +492,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
an = (struct ath_node *)sta->drv_priv;
- tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
- tid = ATH_AN_2_TID(an, tidno);
+ tid = ath_get_skb_tid(sc, an, skb);
seq_first = tid->seq_start;
isba = ts->ts_flags & ATH9K_TX_BA;
@@ -453,7 +504,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* Only BlockAcks have a TID and therefore normal Acks cannot be
* checked
*/
- if (isba && tidno != ts->tid)
+ if (isba && tid->tidno != ts->tid)
txok = false;
isaggr = bf_isaggr(bf);
@@ -489,7 +540,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
tx_info = IEEE80211_SKB_CB(skb);
fi = get_frame_info(skb);
- if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
+ !tid->active) {
/*
* Outside of the current BlockAck window,
* maybe part of a previous session
@@ -522,7 +574,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* not a holding desc.
*/
INIT_LIST_HEAD(&bf_head);
- if (bf_next != NULL || !bf_last->bf_stale)
+ if (bf_next != NULL || !bf_last->bf_state.stale)
list_move_tail(&bf->list, &bf_head);
if (!txpending) {
@@ -546,7 +598,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ieee80211_sta_eosp(sta);
}
/* retry the un-acked ones */
- if (bf->bf_next == NULL && bf_last->bf_stale) {
+ if (bf->bf_next == NULL && bf_last->bf_state.stale) {
struct ath_buf *tbf;
tbf = ath_clone_txbuf(sc, bf_last);
@@ -583,7 +635,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (an->sleeping)
ieee80211_sta_set_buffered(sta, tid->tidno, true);
- skb_queue_splice(&bf_pending, &tid->buf_q);
+ skb_queue_splice_tail(&bf_pending, &tid->retry_q);
if (!an->sleeping) {
ath_tx_queue_tid(txq, tid);
@@ -641,7 +693,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush)
+ if (!flush)
ath_txq_schedule(sc, txq);
}
@@ -815,15 +867,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid)
+ struct ath_atx_tid *tid, struct sk_buff_head **q)
{
+ struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
struct ath_buf *bf;
u16 seqno;
while (1) {
- skb = skb_peek(&tid->buf_q);
+ *q = &tid->retry_q;
+ if (skb_queue_empty(*q))
+ *q = &tid->buf_q;
+
+ skb = skb_peek(*q);
if (!skb)
break;
@@ -831,14 +888,26 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!fi->bf)
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ else
+ bf->bf_state.stale = false;
if (!bf) {
- __skb_unlink(skb, &tid->buf_q);
+ __skb_unlink(skb, *q);
ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
+ bf->bf_next = NULL;
+ bf->bf_lastbf = bf;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+ bf->bf_state.bf_type = 0;
+ return bf;
+ }
+
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
seqno = bf->bf_state.seqno;
@@ -852,73 +921,52 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
- __skb_unlink(skb, &tid->buf_q);
+ __skb_unlink(skb, *q);
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
continue;
}
- bf->bf_next = NULL;
- bf->bf_lastbf = bf;
return bf;
}
return NULL;
}
-static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
- struct ath_txq *txq,
- struct ath_atx_tid *tid,
- struct list_head *bf_q,
- int *aggr_len)
+static bool
+ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct list_head *bf_q,
+ struct ath_buf *bf_first, struct sk_buff_head *tid_q,
+ int *aggr_len)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
- struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
- int rl = 0, nframes = 0, ndelim, prev_al = 0;
+ struct ath_buf *bf = bf_first, *bf_prev = NULL;
+ int nframes = 0, ndelim;
u16 aggr_limit = 0, al = 0, bpad = 0,
- al_delta, h_baw = tid->baw_size / 2;
- enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+ al_delta, h_baw = tid->baw_size / 2;
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
+ bool closed = false;
- do {
- bf = ath_tx_get_tid_subframe(sc, txq, tid);
- if (!bf) {
- status = ATH_AGGR_BAW_CLOSED;
- break;
- }
+ bf = bf_first;
+ aggr_limit = ath_lookup_rate(sc, bf, tid);
+ do {
skb = bf->bf_mpdu;
fi = get_frame_info(skb);
- if (!bf_first)
- bf_first = bf;
-
- if (!rl) {
- ath_set_rates(tid->an->vif, tid->an->sta, bf);
- aggr_limit = ath_lookup_rate(sc, bf, tid);
- rl = 1;
- }
-
/* do not exceed aggregation limit */
al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
+ if (nframes) {
+ if (aggr_limit < al + bpad + al_delta ||
+ ath_lookup_legacy(bf) || nframes >= h_baw)
+ break;
- if (nframes &&
- ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
- ath_lookup_legacy(bf))) {
- status = ATH_AGGR_LIMITED;
- break;
- }
-
- tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
- if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
- break;
-
- /* do not exceed subframe limit */
- if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
- status = ATH_AGGR_LIMITED;
- break;
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
+ break;
}
/* add padding for previous frame to aggregation length */
@@ -936,22 +984,37 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bf->bf_next = NULL;
/* link buffers of this frame to the aggregate */
- if (!fi->retries)
- ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
+ if (!fi->baw_tracked)
+ ath_tx_addto_baw(sc, tid, bf);
bf->bf_state.ndelim = ndelim;
- __skb_unlink(skb, &tid->buf_q);
+ __skb_unlink(skb, tid_q);
list_add_tail(&bf->list, bf_q);
if (bf_prev)
bf_prev->bf_next = bf;
bf_prev = bf;
- } while (!skb_queue_empty(&tid->buf_q));
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ if (!bf) {
+ closed = true;
+ break;
+ }
+ } while (ath_tid_has_buffered(tid));
+
+ bf = bf_first;
+ bf->bf_lastbf = bf_prev;
+
+ if (bf == bf_prev) {
+ al = get_frame_info(bf->bf_mpdu)->framelen;
+ bf->bf_state.bf_type = BUF_AMPDU;
+ } else {
+ TX_STAT_INC(txq->axq_qnum, a_aggr);
+ }
*aggr_len = al;
- return status;
+ return closed;
#undef PADBYTES
}
@@ -1023,7 +1086,7 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
}
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_info *info, int len)
+ struct ath_tx_info *info, int len, bool rts)
{
struct ath_hw *ah = sc->sc_ah;
struct sk_buff *skb;
@@ -1032,6 +1095,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
const struct ieee80211_rate *rate;
struct ieee80211_hdr *hdr;
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ u32 rts_thresh = sc->hw->wiphy->rts_threshold;
int i;
u8 rix = 0;
@@ -1054,7 +1118,17 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
rix = rates[i].idx;
info->rates[i].Tries = rates[i].count;
- if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /*
+ * Handle RTS threshold for unaggregated HT frames.
+ */
+ if (bf_isampdu(bf) && !bf_isaggr(bf) &&
+ (rates[i].flags & IEEE80211_TX_RC_MCS) &&
+ unlikely(rts_thresh != (u32) -1)) {
+ if (!rts_thresh || (len > rts_thresh))
+ rts = true;
+ }
+
+ if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
info->flags |= ATH9K_TXDESC_RTSENA;
} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -1147,6 +1221,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
struct ath_hw *ah = sc->sc_ah;
struct ath_buf *bf_first = NULL;
struct ath_tx_info info;
+ u32 rts_thresh = sc->hw->wiphy->rts_threshold;
+ bool rts = false;
memset(&info, 0, sizeof(info));
info.is_first = true;
@@ -1183,7 +1259,22 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
info.flags |= (u32) bf->bf_state.bfs_paprd <<
ATH9K_TXDESC_PAPRD_S;
- ath_buf_set_rate(sc, bf, &info, len);
+ /*
+ * mac80211 doesn't handle RTS threshold for HT because
+ * the decision has to be taken based on AMPDU length
+ * and aggregation is done entirely inside ath9k.
+ * Set the RTS/CTS flag for the first subframe based
+ * on the threshold.
+ */
+ if (aggr && (bf == bf_first) &&
+ unlikely(rts_thresh != (u32) -1)) {
+ /*
+ * "len" is the size of the entire AMPDU.
+ */
+ if (!rts_thresh || (len > rts_thresh))
+ rts = true;
+ }
+ ath_buf_set_rate(sc, bf, &info, len, rts);
}
info.buf_addr[0] = bf->bf_buf_addr;
@@ -1212,53 +1303,86 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
}
}
-static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid)
+static void
+ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct list_head *bf_q,
+ struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
- struct ath_buf *bf;
- enum ATH_AGGR_STATUS status;
- struct ieee80211_tx_info *tx_info;
- struct list_head bf_q;
- int aggr_len;
+ struct ath_buf *bf = bf_first, *bf_prev = NULL;
+ struct sk_buff *skb;
+ int nframes = 0;
do {
- if (skb_queue_empty(&tid->buf_q))
- return;
+ struct ieee80211_tx_info *tx_info;
+ skb = bf->bf_mpdu;
- INIT_LIST_HEAD(&bf_q);
+ nframes++;
+ __skb_unlink(skb, tid_q);
+ list_add_tail(&bf->list, bf_q);
+ if (bf_prev)
+ bf_prev->bf_next = bf;
+ bf_prev = bf;
- status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
+ if (nframes >= 2)
+ break;
- /*
- * no frames picked up to be aggregated;
- * block-ack window is not open.
- */
- if (list_empty(&bf_q))
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ if (!bf)
break;
- bf = list_first_entry(&bf_q, struct ath_buf, list);
- bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+ break;
- if (tid->ac->clear_ps_filter) {
- tid->ac->clear_ps_filter = false;
- tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- } else {
- tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
- }
+ ath_set_rates(tid->an->vif, tid->an->sta, bf);
+ } while (1);
+}
- /* if only one frame, send as non-aggregate */
- if (bf == bf->bf_lastbf) {
- aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
- bf->bf_state.bf_type = BUF_AMPDU;
- } else {
- TX_STAT_INC(txq->axq_qnum, a_aggr);
- }
+static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, bool *stop)
+{
+ struct ath_buf *bf;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff_head *tid_q;
+ struct list_head bf_q;
+ int aggr_len = 0;
+ bool aggr, last = true;
+
+ if (!ath_tid_has_buffered(tid))
+ return false;
+
+ INIT_LIST_HEAD(&bf_q);
+
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ if (!bf)
+ return false;
+
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
+ if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
+ (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
+ *stop = true;
+ return false;
+ }
- ath_tx_fill_desc(sc, bf, txq, aggr_len);
- ath_tx_txqaddbuf(sc, txq, &bf_q, false);
- } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
- status != ATH_AGGR_BAW_CLOSED);
+ ath_set_rates(tid->an->vif, tid->an->sta, bf);
+ if (aggr)
+ last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
+ tid_q, &aggr_len);
+ else
+ ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
+
+ if (list_empty(&bf_q))
+ return false;
+
+ if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
+ tid->ac->clear_ps_filter = false;
+ tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ }
+
+ ath_tx_fill_desc(sc, bf, txq, aggr_len);
+ ath_tx_txqaddbuf(sc, txq, &bf_q, false);
+ return true;
}
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1282,6 +1406,9 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
an->mpdudensity = density;
}
+ /* force sequence number allocation for pending frames */
+ ath_tx_tid_change_state(sc, txtid);
+
txtid->active = true;
txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
@@ -1301,8 +1428,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
ath_txq_lock(sc, txq);
txtid->active = false;
- txtid->paused = true;
+ txtid->paused = false;
ath_tx_flush_tid(sc, txtid);
+ ath_tx_tid_change_state(sc, txtid);
ath_txq_unlock_complete(sc, txq);
}
@@ -1326,7 +1454,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
ath_txq_lock(sc, txq);
- buffered = !skb_queue_empty(&tid->buf_q);
+ buffered = ath_tid_has_buffered(tid);
tid->sched = false;
list_del(&tid->list);
@@ -1358,7 +1486,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_txq_lock(sc, txq);
ac->clear_ps_filter = true;
- if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
+ if (!tid->paused && ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
}
@@ -1383,7 +1511,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
tid->paused = false;
- if (!skb_queue_empty(&tid->buf_q)) {
+ if (ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
}
@@ -1403,6 +1531,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
struct list_head bf_q;
struct ath_buf *bf_tail = NULL, *bf;
+ struct sk_buff_head *tid_q;
int sent = 0;
int i;
@@ -1418,15 +1547,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
continue;
ath_txq_lock(sc, tid->ac->txq);
- while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
- bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
+ while (nframes > 0) {
+ bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
if (!bf)
break;
- __skb_unlink(bf->bf_mpdu, &tid->buf_q);
+ __skb_unlink(bf->bf_mpdu, tid_q);
list_add_tail(&bf->list, &bf_q);
ath_set_rates(tid->an->vif, tid->an->sta, bf);
- ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
+ ath_tx_addto_baw(sc, tid, bf);
bf->bf_state.bf_type &= ~BUF_AGGR;
if (bf_tail)
bf_tail->bf_next = bf;
@@ -1436,7 +1565,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
sent++;
TX_STAT_INC(txq->axq_qnum, a_queued_hw);
- if (skb_queue_empty(&tid->buf_q))
+ if (an->sta && !ath_tid_has_buffered(tid))
ieee80211_sta_set_buffered(an->sta, i, false);
}
ath_txq_unlock_complete(sc, tid->ac->txq);
@@ -1595,7 +1724,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
while (!list_empty(list)) {
bf = list_first_entry(list, struct ath_buf, list);
- if (bf->bf_stale) {
+ if (bf->bf_state.stale) {
list_del(&bf->list);
ath_tx_return_buffer(sc, bf);
@@ -1689,25 +1818,27 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
*/
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
- struct ath_atx_ac *ac, *ac_tmp, *last_ac;
+ struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
+ bool sent = false;
if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
- list_empty(&txq->axq_acq) ||
- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ list_empty(&txq->axq_acq))
return;
rcu_read_lock();
- ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
+ while (!list_empty(&txq->axq_acq)) {
+ bool stop = false;
- list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+ ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
list_del(&ac->list);
ac->sched = false;
while (!list_empty(&ac->tid_q)) {
+
tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
list);
list_del(&tid->list);
@@ -1716,17 +1847,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
if (tid->paused)
continue;
- ath_tx_sched_aggr(sc, txq, tid);
+ if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+ sent = true;
/*
* add tid to round-robin queue if more frames
* are pending for the tid
*/
- if (!skb_queue_empty(&tid->buf_q))
+ if (ath_tid_has_buffered(tid))
ath_tx_queue_tid(txq, tid);
- if (tid == last_tid ||
- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ if (stop || tid == last_tid)
break;
}
@@ -1735,9 +1866,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
list_add_tail(&ac->list, &txq->axq_acq);
}
- if (ac == last_ac ||
- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ if (stop)
break;
+
+ if (ac == last_ac) {
+ if (!sent)
+ break;
+
+ sent = false;
+ last_ac = list_entry(txq->axq_acq.prev,
+ struct ath_atx_ac, list);
+ }
}
rcu_read_unlock();
@@ -1816,58 +1955,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
}
-static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid, struct sk_buff *skb,
- struct ath_tx_control *txctl)
-{
- struct ath_frame_info *fi = get_frame_info(skb);
- struct list_head bf_head;
- struct ath_buf *bf;
-
- /*
- * Do not queue to h/w when any of the following conditions is true:
- * - there are pending frames in software queue
- * - the TID is currently paused for ADDBA/BAR request
- * - seqno is not within block-ack window
- * - h/w queue depth exceeds low water mark
- */
- if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
- !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
- txq != sc->tx.uapsdq) {
- /*
- * Add this frame to software queue for scheduling later
- * for aggregation.
- */
- TX_STAT_INC(txq->axq_qnum, a_queued_sw);
- __skb_queue_tail(&tid->buf_q, skb);
- if (!txctl->an || !txctl->an->sleeping)
- ath_tx_queue_tid(txq, tid);
- return;
- }
-
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
- if (!bf) {
- ath_txq_skb_done(sc, txq, skb);
- ieee80211_free_txskb(sc->hw, skb);
- return;
- }
-
- ath_set_rates(tid->an->vif, tid->an->sta, bf);
- bf->bf_state.bf_type = BUF_AMPDU;
- INIT_LIST_HEAD(&bf_head);
- list_add(&bf->list, &bf_head);
-
- /* Add sub-frame to BAW */
- ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
-
- /* Queue to h/w without aggregation */
- TX_STAT_INC(txq->axq_qnum, a_queued_hw);
- bf->bf_lastbf = bf;
- ath_tx_fill_desc(sc, bf, txq, fi->framelen);
- ath_tx_txqaddbuf(sc, txq, &bf_head, false);
-}
-
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb)
{
@@ -2010,6 +2097,7 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
+ struct ath_vif *avp;
struct ath_softc *sc = hw->priv;
int frmlen = skb->len + FCS_LEN;
int padpos, padsize;
@@ -2017,6 +2105,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
/* NOTE: sta can be NULL according to net/mac80211.h */
if (sta)
txctl->an = (struct ath_node *)sta->drv_priv;
+ else if (vif && ieee80211_is_data(hdr->frame_control)) {
+ avp = (void *)vif->drv_priv;
+ txctl->an = &avp->mcast_node;
+ }
if (info->control.hw_key)
frmlen += info->control.hw_key->icv_len;
@@ -2066,7 +2158,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_txq *txq = txctl->txq;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf;
- u8 tidno;
int q;
int ret;
@@ -2094,22 +2185,25 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
- }
-
- if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
- tidno = ieee80211_get_qos_ctl(hdr)[0] &
- IEEE80211_QOS_CTL_TID_MASK;
- tid = ATH_AN_2_TID(txctl->an, tidno);
+ } else if (txctl->an &&
+ ieee80211_is_data_present(hdr->frame_control)) {
+ tid = ath_get_skb_tid(sc, txctl->an, skb);
WARN_ON(tid->ac->txq != txctl->txq);
- }
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
+ if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+ tid->ac->clear_ps_filter = true;
+
/*
- * Try aggregation if it's a unicast data frame
- * and the destination is HT capable.
+ * Add this frame to software queue for scheduling later
+ * for aggregation.
*/
- ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
+ TX_STAT_INC(txq->axq_qnum, a_queued_sw);
+ __skb_queue_tail(&tid->buf_q, skb);
+ if (!txctl->an->sleeping)
+ ath_tx_queue_tid(txq, tid);
+
+ ath_txq_schedule(sc, txq);
goto out;
}
@@ -2168,7 +2262,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bf->bf_lastbf = bf;
ath_set_rates(vif, NULL, bf);
- ath_buf_set_rate(sc, bf, &info, fi->framelen);
+ ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
duration += info.rates[0].PktDuration;
if (bf_tail)
bf_tail->bf_next = bf;
@@ -2372,8 +2466,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- ath_txq_schedule(sc, txq);
+ ath_txq_schedule(sc, txq);
break;
}
bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
@@ -2387,7 +2480,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* it with the STALE flag.
*/
bf_held = NULL;
- if (bf->bf_stale) {
+ if (bf->bf_state.stale) {
bf_held = bf;
if (list_is_last(&bf_held->list, &txq->axq_q))
break;
@@ -2411,7 +2504,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* however leave the last descriptor back as the holding
* descriptor for hw.
*/
- lastbf->bf_stale = true;
+ lastbf->bf_state.stale = true;
INIT_LIST_HEAD(&bf_head);
if (!list_is_singular(&lastbf->list))
list_cut_position(&bf_head,
@@ -2466,6 +2559,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (ts.qid == sc->beacon.beaconq) {
sc->beacon.tx_processed = true;
sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
+
+ ath9k_csa_is_finished(sc);
continue;
}
@@ -2482,7 +2577,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
}
bf = list_first_entry(fifo_list, struct ath_buf, list);
- if (bf->bf_stale) {
+ if (bf->bf_state.stale) {
list_del(&bf->list);
ath_tx_return_buffer(sc, bf);
bf = list_first_entry(fifo_list, struct ath_buf, list);
@@ -2504,7 +2599,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_tx_txqaddbuf(sc, txq, &bf_q, true);
}
} else {
- lastbf->bf_stale = true;
+ lastbf->bf_state.stale = true;
if (bf != lastbf)
list_cut_position(&bf_head, fifo_list,
lastbf->list.prev);
@@ -2595,6 +2690,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
tid->paused = false;
tid->active = false;
__skb_queue_head_init(&tid->buf_q);
+ __skb_queue_head_init(&tid->retry_q);
acno = TID_TO_WME_AC(tidno);
tid->ac = &an->ac[acno];
}
@@ -2602,6 +2698,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
for (acno = 0, ac = &an->ac[acno];
acno < IEEE80211_NUM_ACS; acno++, ac++) {
ac->sched = false;
+ ac->clear_ps_filter = true;
ac->txq = sc->tx.txq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
}
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 4684dd989496..e935f61c7fad 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -602,8 +602,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
if (bar->start_seq_num == entry_bar->start_seq_num &&
TID_CHECK(bar->control, entry_bar->control) &&
- compare_ether_addr(bar->ra, entry_bar->ta) == 0 &&
- compare_ether_addr(bar->ta, entry_bar->ra) == 0) {
+ ether_addr_equal(bar->ra, entry_bar->ta) &&
+ ether_addr_equal(bar->ta, entry_bar->ra)) {
struct ieee80211_tx_info *tx_info;
tx_info = IEEE80211_SKB_CB(entry_skb);
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index f891d514d881..990dd42ae79e 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -11,9 +11,6 @@ wil6210-y += txrx.o
wil6210-y += debug.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
-ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
- subdir-ccflags-y += -Werror
-endif
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ab636767fbde..1caa31992a7e 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -51,7 +51,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
if ((i % 64) == 0 && (i != 0))
seq_printf(s, "\n");
seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
- "S" : (vring->ctx[i] ? "H" : "h"));
+ "S" : (vring->ctx[i].skb ? "H" : "h"));
}
seq_printf(s, "\n");
}
@@ -406,7 +406,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
volatile struct vring_tx_desc *d =
&(vring->va[dbg_txdesc_index].tx);
volatile u32 *u = (volatile u32 *)d;
- struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
+ struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 29dd1e58cb17..717178f09aa8 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -127,6 +127,8 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
ndev->netdev_ops = &wil_netdev_ops;
ndev->ieee80211_ptr = wdev;
+ ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index eff1239be53a..e59239d22b94 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -37,36 +37,40 @@ static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
DECLARE_EVENT_CLASS(wil6210_wmi,
- TP_PROTO(u16 id, void *buf, u16 buf_len),
+ TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
- TP_ARGS(id, buf, buf_len),
+ TP_ARGS(wmi, buf, buf_len),
TP_STRUCT__entry(
+ __field(u8, mid)
__field(u16, id)
+ __field(u32, timestamp)
__field(u16, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
- __entry->id = id;
+ __entry->mid = wmi->mid;
+ __entry->id = le16_to_cpu(wmi->id);
+ __entry->timestamp = le32_to_cpu(wmi->timestamp);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "id 0x%04x len %d",
- __entry->id, __entry->buf_len
+ "MID %d id 0x%04x len %d timestamp %d",
+ __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
)
);
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
- TP_PROTO(u16 id, void *buf, u16 buf_len),
- TP_ARGS(id, buf, buf_len)
+ TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_ARGS(wmi, buf, buf_len)
);
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
- TP_PROTO(u16 id, void *buf, u16 buf_len),
- TP_ARGS(id, buf, buf_len)
+ TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_ARGS(wmi, buf, buf_len)
);
#define WIL6210_MSG_MAX (200)
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d240b24e1ccf..d505b2676a73 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -18,6 +18,9 @@
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
#include "wil6210.h"
#include "wmi.h"
@@ -70,7 +73,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
vring->swhead = 0;
vring->swtail = 0;
- vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+ vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
if (!vring->ctx) {
vring->va = NULL;
return -ENOMEM;
@@ -108,39 +111,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
while (!wil_vring_is_empty(vring)) {
dma_addr_t pa;
- struct sk_buff *skb;
u16 dmalen;
+ struct wil_ctx *ctx;
if (tx) {
struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d =
&vring->va[vring->swtail].tx;
+ ctx = &vring->ctx[vring->swtail];
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
- skb = vring->ctx[vring->swtail];
- if (skb) {
- dma_unmap_single(dev, pa, dmalen,
- DMA_TO_DEVICE);
- dev_kfree_skb_any(skb);
- vring->ctx[vring->swtail] = NULL;
- } else {
+ if (vring->ctx[vring->swtail].mapped_as_page) {
dma_unmap_page(dev, pa, dmalen,
DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(dev, pa, dmalen,
+ DMA_TO_DEVICE);
}
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
vring->swtail = wil_vring_next_tail(vring);
} else { /* rx */
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d =
- &vring->va[vring->swtail].rx;
+ &vring->va[vring->swhead].rx;
+ ctx = &vring->ctx[vring->swhead];
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
- skb = vring->ctx[vring->swhead];
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
- kfree_skb(skb);
+ kfree_skb(ctx->skb);
wil_vring_advance_head(vring, 1);
}
}
@@ -187,7 +190,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
d->dma.length = cpu_to_le16(sz);
*_d = *d;
- vring->ctx[i] = skb;
+ vring->ctx[i].skb = skb;
return 0;
}
@@ -352,11 +355,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
return NULL;
}
- skb = vring->ctx[vring->swhead];
+ skb = vring->ctx[vring->swhead].skb;
d = wil_skb_rxdesc(skb);
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
- vring->ctx[vring->swhead] = NULL;
+ vring->ctx[vring->swhead].skb = NULL;
wil_vring_advance_head(vring, 1);
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
@@ -407,6 +410,21 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
return NULL;
}
+ /* L4 IDENT is on when HW calculated checksum, check status
+ * and in case of error drop the packet
+ * higher stack layers will handle retransmission (if required)
+ */
+ if (d->dma.status & RX_DMA_STATUS_L4_IDENT) {
+ /* L4 protocol identified, csum calculated */
+ if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW don't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ }
+
ds_bits = wil_rxdesc_ds_bits(d);
if (ds_bits == 1) {
/*
@@ -646,6 +664,53 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
return 0;
}
+static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
+ struct vring_tx_desc *d,
+ struct sk_buff *skb)
+{
+ int protocol;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ protocol = ip_hdr(skb)->protocol;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+ /* L4 header len: TCP header length */
+ d->dma.d0 |=
+ (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+ break;
+ case IPPROTO_UDP:
+ /* L4 header len: UDP header length */
+ d->dma.d0 |=
+ (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ d->dma.ip_length = skb_network_header_len(skb);
+ d->dma.b11 = ETH_HLEN; /* MAC header length */
+ d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
+ /* Enable TCP/UDP checksum */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+ /* Calculate pseudo-header */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+
+ return 0;
+}
+
static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct sk_buff *skb)
{
@@ -655,7 +720,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
u32 swhead = vring->swhead;
int avail = wil_vring_avail_tx(vring);
int nr_frags = skb_shinfo(skb)->nr_frags;
- uint f;
+ uint f = 0;
int vring_index = vring - wil->vring_tx;
uint i = swhead;
dma_addr_t pa;
@@ -686,13 +751,20 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
return -EINVAL;
/* 1-st segment */
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
+ /* Process TCP/UDP checksum offloading */
+ if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
+ wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n",
+ vring_index);
+ goto dma_error;
+ }
+
d->mac.d[2] |= ((nr_frags + 1) <<
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
if (nr_frags)
*_d = *d;
/* middle segments */
- for (f = 0; f < nr_frags; f++) {
+ for (; f < nr_frags; f++) {
const struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[f];
int len = skb_frag_size(frag);
@@ -703,7 +775,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
wil_tx_desc_map(d, pa, len, vring_index);
- vring->ctx[i] = NULL;
+ vring->ctx[i].mapped_as_page = 1;
*_d = *d;
}
/* for the last seg only */
@@ -712,6 +784,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
*_d = *d;
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i].skb = skb_get(skb);
+
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
@@ -720,29 +798,31 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
- /* hold reference to skb
- * to prevent skb release before accounting
- * in case of immediate "tx done"
- */
- vring->ctx[i] = skb_get(skb);
return 0;
dma_error:
/* unmap what we have mapped */
- /* Note: increment @f to operate with positive index */
- for (f++; f > 0; f--) {
+ nr_frags = f + 1; /* frags mapped + one for skb head */
+ for (f = 0; f < nr_frags; f++) {
u16 dmalen;
+ struct wil_ctx *ctx;
i = (swhead + f) % vring->size;
+ ctx = &vring->ctx[i];
_d = &(vring->va[i].tx);
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
- if (vring->ctx[i])
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
- else
+ if (ctx->mapped_as_page)
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+
+ memset(ctx, 0, sizeof(*ctx));
}
return -EINVAL;
@@ -821,8 +901,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
&vring->va[vring->swtail].tx;
struct vring_tx_desc dd, *d = &dd;
dma_addr_t pa;
- struct sk_buff *skb;
u16 dmalen;
+ struct wil_ctx *ctx = &vring->ctx[vring->swtail];
+ struct sk_buff *skb = ctx->skb;
*d = *_d;
@@ -840,7 +921,11 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
(const void *)d, sizeof(*d), false);
pa = wil_desc_addr(&d->dma.addr);
- skb = vring->ctx[vring->swtail];
+ if (ctx->mapped_as_page)
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+
if (skb) {
if (d->dma.error == 0) {
ndev->stats.tx_packets++;
@@ -849,16 +934,15 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
ndev->stats.tx_errors++;
}
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
- vring->ctx[vring->swtail] = NULL;
- } else {
- dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
}
- d->dma.addr.addr_low = 0;
- d->dma.addr.addr_high = 0;
- d->dma.length = 0;
- d->dma.status = TX_DMA_STATUS_DU;
+ memset(ctx, 0, sizeof(*ctx));
+ /*
+ * There is no need to touch HW descriptor:
+ * - ststus bit TX_DMA_STATUS_DU is set by design,
+ * so hardware will not try to process this desc.,
+ * - rest of descriptor will be initialized on Tx.
+ */
vring->swtail = wil_vring_next_tail(vring);
done++;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 859aea68a1fa..b3828279204c 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -235,7 +235,16 @@ struct vring_tx_mac {
#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
-#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 /* L4 type: 0-UDP, 2-TCP */
+
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_POS 0
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_LEN 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_MSK 0x7F /* MAC hdr len */
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_LEN 1
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_MSK 0x80 /* 1-IPv4, 0-IPv6 */
#define TX_DMA_STATUS_DU BIT(0)
@@ -334,8 +343,17 @@ struct vring_rx_mac {
#define RX_DMA_D0_CMD_DMA_IT BIT(10)
+/* Error field, offload bits */
+#define RX_DMA_ERROR_L3_ERR BIT(4)
+#define RX_DMA_ERROR_L4_ERR BIT(5)
+
+
+/* Status field */
#define RX_DMA_STATUS_DU BIT(0)
#define RX_DMA_STATUS_ERROR BIT(2)
+
+#define RX_DMA_STATUS_L3_IDENT BIT(4)
+#define RX_DMA_STATUS_L4_IDENT BIT(5)
#define RX_DMA_STATUS_PHY_INFO BIT(6)
struct vring_rx_dma {
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 44fdab51de7e..c4a51638736a 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -156,11 +156,22 @@ struct wil6210_mbox_hdr {
/* max. value for wil6210_mbox_hdr.len */
#define MAX_MBOXITEM_SIZE (240)
+/**
+ * struct wil6210_mbox_hdr_wmi - WMI header
+ *
+ * @mid: MAC ID
+ * 00 - default, created by FW
+ * 01..0f - WiFi ports, driver to create
+ * 10..fe - debug
+ * ff - broadcast
+ * @id: command/event ID
+ * @timestamp: FW fills for events, free-running msec timer
+ */
struct wil6210_mbox_hdr_wmi {
- u8 reserved0[2];
+ u8 mid;
+ u8 reserved;
__le16 id;
- __le16 info1; /* bits [0..3] - device_id, rest - unused */
- u8 reserved1[2];
+ __le32 timestamp;
} __packed;
struct pending_wmi_event {
@@ -172,6 +183,14 @@ struct pending_wmi_event {
} __packed event;
};
+/**
+ * struct wil_ctx - software context for Vring descriptor
+ */
+struct wil_ctx {
+ struct sk_buff *skb;
+ u8 mapped_as_page:1;
+};
+
union vring_desc;
struct vring {
@@ -181,7 +200,7 @@ struct vring {
u32 swtail;
u32 swhead;
u32 hwtail; /* write here to inform hw */
- void **ctx; /* void *ctx[size] - software context */
+ struct wil_ctx *ctx; /* ctx[size] - software context */
};
enum { /* for wil6210_priv.status */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index dc8059ad4bab..063963ee422a 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -172,8 +172,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
.len = cpu_to_le16(sizeof(cmd.wmi) + len),
},
.wmi = {
+ .mid = 0,
.id = cpu_to_le16(cmdid),
- .info1 = 0,
},
};
struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
@@ -248,7 +248,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.head));
- trace_wil6210_wmi_cmd(cmdid, buf, len);
+ trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
/* interrupt to FW */
iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
@@ -339,7 +339,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
}
} else {
cfg80211_rx_mgmt(wil->wdev, freq, signal,
- (void *)rx_mgmt_frame, d_len, GFP_KERNEL);
+ (void *)rx_mgmt_frame, d_len, 0, GFP_KERNEL);
}
}
@@ -640,9 +640,13 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
hdr.flags);
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
- u16 id = le16_to_cpu(evt->event.wmi.id);
- wil_dbg_wmi(wil, "WMI event 0x%04x\n", id);
- trace_wil6210_wmi_event(id, &evt->event.wmi, len);
+ struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
+ u16 id = le16_to_cpu(wmi->id);
+ u32 tstamp = le32_to_cpu(wmi->timestamp);
+ wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
+ id, wmi->mid, tstamp);
+ trace_wil6210_wmi_event(wmi, &wmi[1],
+ len - sizeof(*wmi));
}
wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
&evt->event.hdr, sizeof(hdr) + len, true);
@@ -920,6 +924,12 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
cmd.sniffer_cfg.phy_support =
cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+ } else {
+ /* Initialize offload (in non-sniffer mode).
+ * Linux IP stack always calculates IP checksum
+ * HW always calculate TCP/UDP checksum
+ */
+ cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
}
/* typical time for secure PCP is 840ms */
rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index f7c70b3a6ea9..c51d2dc489e4 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
- ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
- ring_mem_size, &(ring->dmabase),
- GFP_KERNEL | __GFP_ZERO);
+ ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
+ ring_mem_size, &(ring->dmabase),
+ GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 0e933bb71543..ccd24f0acb8d 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4645,6 +4645,19 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PSM_RUN,
B43_MACCTL_PSM_JMP0);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_core_pci_down(dev->dev->bdev->bus);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ /* TODO */
+ break;
+#endif
+ }
+
b43_dma_free(dev);
b43_pio_free(dev);
b43_chip_exit(dev);
@@ -4684,6 +4697,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
case B43_BUS_BCMA:
bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
dev->dev->bdev, true);
+ bcma_core_pci_up(dev->dev->bdev->bus);
break;
#endif
#ifdef CONFIG_B43_SSB
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index faeafe219c57..42eb26c99e11 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -331,10 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
static int alloc_ringmemory(struct b43legacy_dmaring *ring)
{
/* GFP flags must match the flags in free_ringmemory()! */
- ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
- B43legacy_DMA_RINGMEMSIZE,
- &(ring->dmabase),
- GFP_KERNEL | __GFP_ZERO);
+ ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
+ B43legacy_DMA_RINGMEMSIZE,
+ &(ring->dmabase), GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e3f3c48f86d4..e13b1a65c65f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -592,6 +592,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
+ struct sk_buff_head pktq;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -602,7 +603,10 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
}
memcpy(mypkt->data, buf, nbytes);
- err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+ __skb_queue_head_init(&pktq);
+ __skb_queue_tail(&pktq, mypkt);
+ err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
+ __skb_dequeue_tail(&pktq);
brcmu_pkt_buf_free_skb(mypkt);
return err;
@@ -611,22 +615,18 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt)
+ uint flags, struct sk_buff_head *pktq)
{
uint width;
int err = 0;
- struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
- fn, addr, pkt->len);
+ fn, addr, pktq->qlen);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
brcmf_sdio_addrprep(sdiodev, width, &addr);
- skb_queue_head_init(&pkt_list);
- skb_queue_tail(&pkt_list, pkt);
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
- skb_dequeue_tail(&pkt_list);
+ err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 289e386f01f6..64f4a2bc8dde 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -350,7 +350,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
sdiodev->bus_if = bus_if;
bus_if->bus_priv.sdio = sdiodev;
- bus_if->align = BRCMF_SDALIGN;
dev_set_drvdata(&func->dev, bus_if);
dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
sdiodev->dev = &sdiodev->func[1]->dev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 86cbfe2c7c6c..2eb9e642c9bf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -194,6 +194,8 @@
#define BRCMF_E_IF_DEL 2
#define BRCMF_E_IF_CHANGE 3
+#define BRCMF_E_IF_FLAG_NOIF 1
+
#define BRCMF_E_IF_ROLE_STA 0
#define BRCMF_E_IF_ROLE_AP 1
#define BRCMF_E_IF_ROLE_WDS 2
@@ -209,6 +211,8 @@
#define BRCMF_DCMD_MEDLEN 1536
#define BRCMF_DCMD_MAXLEN 8192
+#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS 256
+
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
* that indicates which bits within the pattern should be matched.
@@ -505,6 +509,25 @@ struct brcmf_dcmd {
uint needed; /* bytes needed (optional) */
};
+/**
+ * struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
+ *
+ * @pktslots: dynamic allocated array for ordering AMPDU packets.
+ * @flow_id: AMPDU flow identifier.
+ * @cur_idx: last AMPDU index from firmware.
+ * @exp_idx: expected next AMPDU index.
+ * @max_idx: maximum amount of packets per AMPDU.
+ * @pend_pkts: number of packets currently in @pktslots.
+ */
+struct brcmf_ampdu_rx_reorder {
+ struct sk_buff **pktslots;
+ u8 flow_id;
+ u8 cur_idx;
+ u8 exp_idx;
+ u8 max_idx;
+ u8 pend_pkts;
+};
+
/* Forward decls for struct brcmf_pub (see below) */
struct brcmf_proto; /* device communication protocol info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
@@ -536,9 +559,10 @@ struct brcmf_pub {
struct brcmf_fweh_info fweh;
- bool fw_signals;
struct brcmf_fws_info *fws;
- spinlock_t fws_spinlock;
+
+ struct brcmf_ampdu_rx_reorder
+ *reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
#ifdef DEBUG
struct dentry *dbgfs_dir;
#endif
@@ -604,6 +628,9 @@ struct brcmf_if {
wait_queue_head_t pend_8021x_wait;
};
+struct brcmf_skb_reorder_data {
+ u8 *reorder;
+};
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 080395f49fa5..f7c1985844e4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -36,7 +36,11 @@ struct brcmf_bus_dcmd {
*
* @init: prepare for communication with dongle.
* @stop: clear pending frames, disable data flow.
- * @txdata: send a data frame to the dongle (callee disposes skb).
+ * @txdata: send a data frame to the dongle. When the data
+ * has been transferred, the common driver must be
+ * notified using brcmf_txcomplete(). The common
+ * driver calls this function with interrupts
+ * disabled.
* @txctl: transmit a control request message to dongle.
* @rxctl: receive a control response message from dongle.
* @gettxq: obtain a reference of bus transmit queue (optional).
@@ -65,7 +69,6 @@ struct brcmf_bus_ops {
* @maxctl: maximum size for rxctl request message.
* @tx_realloc: number of tx packets realloced for headroom.
* @dstats: dongle-based statistical data.
- * @align: alignment requirement for the bus.
* @dcmd_list: bus/device specific dongle initialization commands.
* @chip: device identifier of the dongle chip.
* @chiprev: revision of the dongle chip.
@@ -80,7 +83,6 @@ struct brcmf_bus {
enum brcmf_bus_state state;
uint maxctl;
unsigned long tx_realloc;
- u8 align;
u32 chip;
u32 chiprev;
struct list_head dcmd_list;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 80099016d21f..e067aec1fbf1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -38,6 +38,19 @@ MODULE_LICENSE("Dual BSD/GPL");
#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
+/* AMPDU rx reordering definitions */
+#define BRCMF_RXREORDER_FLOWID_OFFSET 0
+#define BRCMF_RXREORDER_MAXIDX_OFFSET 2
+#define BRCMF_RXREORDER_FLAGS_OFFSET 4
+#define BRCMF_RXREORDER_CURIDX_OFFSET 6
+#define BRCMF_RXREORDER_EXPIDX_OFFSET 8
+
+#define BRCMF_RXREORDER_DEL_FLOW 0x01
+#define BRCMF_RXREORDER_FLUSH_ALL 0x02
+#define BRCMF_RXREORDER_CURIDX_VALID 0x04
+#define BRCMF_RXREORDER_EXPIDX_VALID 0x08
+#define BRCMF_RXREORDER_NEW_HOLE 0x10
+
/* Error bits */
int brcmf_msg_level;
module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
@@ -265,17 +278,234 @@ void brcmf_txflowblock(struct device *dev, bool state)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
- int i;
brcmf_dbg(TRACE, "Enter\n");
- if (brcmf_fws_fc_active(drvr->fws)) {
- brcmf_fws_bus_blocked(drvr, state);
+ brcmf_fws_bus_blocked(drvr, state);
+}
+
+static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+ skb->dev = ifp->ndev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ ifp->stats.multicast++;
+
+ /* Process special event packets */
+ brcmf_fweh_process_skb(ifp->drvr, skb);
+
+ if (!(ifp->ndev->flags & IFF_UP)) {
+ brcmu_pkt_buf_free_skb(skb);
+ return;
+ }
+
+ ifp->stats.rx_bytes += skb->len;
+ ifp->stats.rx_packets++;
+
+ brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
+ */
+ netif_rx_ni(skb);
+}
+
+static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
+ u8 start, u8 end,
+ struct sk_buff_head *skb_list)
+{
+ /* initialize return list */
+ __skb_queue_head_init(skb_list);
+
+ if (rfi->pend_pkts == 0) {
+ brcmf_dbg(INFO, "no packets in reorder queue\n");
+ return;
+ }
+
+ do {
+ if (rfi->pktslots[start]) {
+ __skb_queue_tail(skb_list, rfi->pktslots[start]);
+ rfi->pktslots[start] = NULL;
+ }
+ start++;
+ if (start > rfi->max_idx)
+ start = 0;
+ } while (start != end);
+ rfi->pend_pkts -= skb_queue_len(skb_list);
+}
+
+static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
+ struct sk_buff *pkt)
+{
+ u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
+ struct brcmf_ampdu_rx_reorder *rfi;
+ struct sk_buff_head reorder_list;
+ struct sk_buff *pnext;
+ u8 flags;
+ u32 buf_size;
+
+ flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
+ flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
+
+ /* validate flags and flow id */
+ if (flags == 0xFF) {
+ brcmf_err("invalid flags...so ignore this packet\n");
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ rfi = ifp->drvr->reorder_flows[flow_id];
+ if (flags & BRCMF_RXREORDER_DEL_FLOW) {
+ brcmf_dbg(INFO, "flow-%d: delete\n",
+ flow_id);
+
+ if (rfi == NULL) {
+ brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
+ flow_id);
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
+ &reorder_list);
+ /* add the last packet */
+ __skb_queue_tail(&reorder_list, pkt);
+ kfree(rfi);
+ ifp->drvr->reorder_flows[flow_id] = NULL;
+ goto netif_rx;
+ }
+ /* from here on we need a flow reorder instance */
+ if (rfi == NULL) {
+ buf_size = sizeof(*rfi);
+ max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+
+ buf_size += (max_idx + 1) * sizeof(pkt);
+
+ /* allocate space for flow reorder info */
+ brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
+ flow_id, max_idx);
+ rfi = kzalloc(buf_size, GFP_ATOMIC);
+ if (rfi == NULL) {
+ brcmf_err("failed to alloc buffer\n");
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ ifp->drvr->reorder_flows[flow_id] = rfi;
+ rfi->pktslots = (struct sk_buff **)(rfi+1);
+ rfi->max_idx = max_idx;
+ }
+ if (flags & BRCMF_RXREORDER_NEW_HOLE) {
+ if (rfi->pend_pkts) {
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
+ rfi->exp_idx,
+ &reorder_list);
+ WARN_ON(rfi->pend_pkts);
+ } else {
+ __skb_queue_head_init(&reorder_list);
+ }
+ rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+ rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+ rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+ rfi->pktslots[rfi->cur_idx] = pkt;
+ rfi->pend_pkts++;
+ brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
+ flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
+ } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
+ cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+ if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
+ /* still in the current hole */
+ /* enqueue the current on the buffer chain */
+ if (rfi->pktslots[cur_idx] != NULL) {
+ brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
+ brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+ rfi->pktslots[cur_idx] = NULL;
+ }
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+ rfi->cur_idx = cur_idx;
+ brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
+ flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+ /* can return now as there is no reorder
+ * list to process.
+ */
+ return;
+ }
+ if (rfi->exp_idx == cur_idx) {
+ if (rfi->pktslots[cur_idx] != NULL) {
+ brcmf_dbg(INFO, "error buffer pending..free it\n");
+ brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+ rfi->pktslots[cur_idx] = NULL;
+ }
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+
+ /* got the expected one. flush from current to expected
+ * and update expected
+ */
+ brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
+ flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+ rfi->cur_idx = cur_idx;
+ rfi->exp_idx = exp_idx;
+
+ brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
+ &reorder_list);
+ brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
+ flow_id, skb_queue_len(&reorder_list),
+ rfi->pend_pkts);
+ } else {
+ u8 end_idx;
+
+ brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
+ flow_id, flags, rfi->cur_idx, rfi->exp_idx,
+ cur_idx, exp_idx);
+ if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+ end_idx = rfi->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ /* flush pkts first */
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+ &reorder_list);
+
+ if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
+ __skb_queue_tail(&reorder_list, pkt);
+ } else {
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+ }
+ rfi->exp_idx = exp_idx;
+ rfi->cur_idx = cur_idx;
+ }
} else {
- for (i = 0; i < BRCMF_MAX_IFS; i++)
- brcmf_txflowblock_if(drvr->iflist[i],
- BRCMF_NETIF_STOP_REASON_BLOCK_BUS,
- state);
+ /* explicity window move updating the expected index */
+ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+ brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
+ flow_id, flags, rfi->exp_idx, exp_idx);
+ if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+ end_idx = rfi->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+ &reorder_list);
+ __skb_queue_tail(&reorder_list, pkt);
+ /* set the new expected idx */
+ rfi->exp_idx = exp_idx;
+ }
+netif_rx:
+ skb_queue_walk_safe(&reorder_list, pkt, pnext) {
+ __skb_unlink(pkt, &reorder_list);
+ brcmf_netif_rx(ifp, pkt);
}
}
@@ -285,16 +515,18 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_skb_reorder_data *rd;
u8 ifidx;
int ret;
- brcmf_dbg(DATA, "Enter\n");
+ brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
+ skb_queue_len(skb_list));
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
/* process and remove protocol-specific header */
- ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb);
+ ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
ifp = drvr->iflist[ifidx];
if (ret || !ifp || !ifp->ndev) {
@@ -304,31 +536,11 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
continue;
}
- skb->dev = ifp->ndev;
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- if (skb->pkt_type == PACKET_MULTICAST)
- ifp->stats.multicast++;
-
- /* Process special event packets */
- brcmf_fweh_process_skb(drvr, skb);
-
- if (!(ifp->ndev->flags & IFF_UP)) {
- brcmu_pkt_buf_free_skb(skb);
- continue;
- }
-
- ifp->stats.rx_bytes += skb->len;
- ifp->stats.rx_packets++;
-
- if (in_interrupt())
- netif_rx(skb);
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ if (rd->reorder)
+ brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
else
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service the
- * NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
- */
- netif_rx_ni(skb);
+ brcmf_netif_rx(ifp, skb);
}
}
@@ -889,7 +1101,6 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0)
goto fail;
- drvr->fw_signals = true;
ret = brcmf_fws_init(drvr);
if (ret < 0)
goto fail;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 264111968320..1aa75d5951b8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -201,13 +201,6 @@ struct rte_console {
#define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
#define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
-/* HW frame tag */
-#define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
-
-/* Total length of frame header for dongle protocol */
-#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
-#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
-
/*
* Software allocation of To SB Mailbox resources
*/
@@ -250,38 +243,6 @@ struct rte_console {
/* Current protocol version */
#define SDPCM_PROT_VERSION 4
-/* SW frame header */
-#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
-
-#define SDPCM_CHANNEL_MASK 0x00000f00
-#define SDPCM_CHANNEL_SHIFT 8
-#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
-
-#define SDPCM_NEXTLEN_OFFSET 2
-
-/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
-#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
-#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
-#define SDPCM_DOFFSET_MASK 0xff000000
-#define SDPCM_DOFFSET_SHIFT 24
-#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
-#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
-#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
-#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
-
-#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
-
-/* logical channel numbers */
-#define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
-#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
-#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
-#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
-#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
-
-#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
-
-#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
-
/*
* Shared structure between dongle and the host.
* The structure contains pointers to trap or assert information.
@@ -396,8 +357,8 @@ struct sdpcm_shared_le {
__le32 brpt_addr;
};
-/* SDIO read frame info */
-struct brcmf_sdio_read {
+/* dongle SDIO bus specific header info */
+struct brcmf_sdio_hdrinfo {
u8 seq_num;
u8 channel;
u16 len;
@@ -431,7 +392,7 @@ struct brcmf_sdio {
u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
u8 rx_seq; /* Receive sequence number (expected) */
- struct brcmf_sdio_read cur_read;
+ struct brcmf_sdio_hdrinfo cur_read;
/* info of current read frame */
bool rxskip; /* Skip receive (awaiting NAK ACK) */
bool rxpending; /* Data frame pending in dongle */
@@ -500,6 +461,8 @@ struct brcmf_sdio {
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
bool sleeping; /* SDIO bus sleeping */
+
+ u8 tx_hdrlen; /* sdio bus header length for tx packet */
};
/* clkstate */
@@ -510,7 +473,6 @@ struct brcmf_sdio {
#ifdef DEBUG
static int qcount[NUMPRIO];
-static int tx_packets[NUMPRIO];
#endif /* DEBUG */
#define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
@@ -1043,18 +1005,63 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
}
}
-static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
- struct brcmf_sdio_read *rd,
- enum brcmf_sdio_frmtype type)
+/**
+ * brcmfmac sdio bus specific header
+ * This is the lowest layer header wrapped on the packets transmitted between
+ * host and WiFi dongle which contains information needed for SDIO core and
+ * firmware
+ *
+ * It consists of 2 parts: hw header and software header
+ * hardware header (frame tag) - 4 bytes
+ * Byte 0~1: Frame length
+ * Byte 2~3: Checksum, bit-wise inverse of frame length
+ * software header - 8 bytes
+ * Byte 0: Rx/Tx sequence number
+ * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
+ * Byte 2: Length of next data frame, reserved for Tx
+ * Byte 3: Data offset
+ * Byte 4: Flow control bits, reserved for Tx
+ * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
+ * Byte 6~7: Reserved
+ */
+#define SDPCM_HWHDR_LEN 4
+#define SDPCM_SWHDR_LEN 8
+#define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
+/* software header */
+#define SDPCM_SEQ_MASK 0x000000ff
+#define SDPCM_SEQ_WRAP 256
+#define SDPCM_CHANNEL_MASK 0x00000f00
+#define SDPCM_CHANNEL_SHIFT 8
+#define SDPCM_CONTROL_CHANNEL 0 /* Control */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */
+#define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */
+#define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */
+#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
+#define SDPCM_NEXTLEN_MASK 0x00ff0000
+#define SDPCM_NEXTLEN_SHIFT 16
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+#define SDPCM_FCMASK_MASK 0x000000ff
+#define SDPCM_WINDOW_MASK 0x0000ff00
+#define SDPCM_WINDOW_SHIFT 8
+
+static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
+{
+ u32 hdrvalue;
+ hdrvalue = *(u32 *)swheader;
+ return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
+}
+
+static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
+ struct brcmf_sdio_hdrinfo *rd,
+ enum brcmf_sdio_frmtype type)
{
u16 len, checksum;
u8 rx_seq, fc, tx_seq_max;
+ u32 swheader;
- /*
- * 4 bytes hardware header (frame tag)
- * Byte 0~1: Frame length
- * Byte 2~3: Checksum, bit-wise inverse of frame length
- */
+ /* hw header */
len = get_unaligned_le16(header);
checksum = get_unaligned_le16(header + sizeof(u16));
/* All zero means no more to read */
@@ -1083,24 +1090,16 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
}
rd->len = len;
- /*
- * 8 bytes hardware header
- * Byte 0: Rx sequence number
- * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
- * Byte 2: Length of next data frame
- * Byte 3: Data offset
- * Byte 4: Flow control bits
- * Byte 5: Maximum Sequence number allow for Tx
- * Byte 6~7: Reserved
- */
- if (type == BRCMF_SDIO_FT_SUPER &&
- SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
+ /* software header */
+ header += SDPCM_HWHDR_LEN;
+ swheader = le32_to_cpu(*(__le32 *)header);
+ if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
brcmf_err("Glom descriptor found in superframe head\n");
rd->len = 0;
return -EINVAL;
}
- rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
- rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
+ rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
+ rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
type != BRCMF_SDIO_FT_SUPER) {
brcmf_err("HW header length too long\n");
@@ -1120,7 +1119,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
rd->len = 0;
return -EINVAL;
}
- rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+ rd->dat_offset = brcmf_sdio_getdatoffset(header);
if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
brcmf_err("seq %d: bad data offset\n", rx_seq);
bus->sdcnt.rx_badhdr++;
@@ -1137,14 +1136,15 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
/* no need to check the reset for subframe */
if (type == BRCMF_SDIO_FT_SUB)
return 0;
- rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
/* only warm for NON glom packet */
if (rd->channel != SDPCM_GLOM_CHANNEL)
brcmf_err("seq %d: next length error\n", rx_seq);
rd->len_nxtfrm = 0;
}
- fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+ swheader = le32_to_cpu(*(__le32 *)(header + 4));
+ fc = swheader & SDPCM_FCMASK_MASK;
if (bus->flowcontrol != fc) {
if (~bus->flowcontrol & fc)
bus->sdcnt.fc_xoff++;
@@ -1153,7 +1153,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fc;
}
- tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+ tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
brcmf_err("seq %d: max tx seq number error\n", rx_seq);
tx_seq_max = bus->tx_seq + 2;
@@ -1163,18 +1163,40 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
return 0;
}
+static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
+{
+ *(__le16 *)header = cpu_to_le16(frm_length);
+ *(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
+}
+
+static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
+ struct brcmf_sdio_hdrinfo *hd_info)
+{
+ u32 sw_header;
+
+ brcmf_sdio_update_hwhdr(header, hd_info->len);
+
+ sw_header = bus->tx_seq;
+ sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
+ SDPCM_CHANNEL_MASK;
+ sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
+ SDPCM_DOFFSET_MASK;
+ *(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
+ *(((__le32 *)header) + 2) = 0;
+}
+
static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
-
+ u32 align = 0;
u16 sublen;
struct sk_buff *pfirst, *pnext;
int errcode;
u8 doff, sfdoff;
- struct brcmf_sdio_read rd_new;
+ struct brcmf_sdio_hdrinfo rd_new;
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
@@ -1182,6 +1204,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
bus->glomd, skb_peek(&bus->glom));
+ if (bus->sdiodev->pdata)
+ align = bus->sdiodev->pdata->sd_sgentry_align;
+ if (align < 4)
+ align = 4;
+
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
pfirst = pnext = NULL;
@@ -1205,9 +1232,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pnext = NULL;
break;
}
- if (sublen % BRCMF_SDALIGN) {
+ if (sublen % align) {
brcmf_err("sublen %d not multiple of %d\n",
- sublen, BRCMF_SDALIGN);
+ sublen, align);
}
totlen += sublen;
@@ -1220,7 +1247,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
/* Allocate/chain packet for next subframe */
- pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
+ pnext = brcmu_pkt_buf_get_skb(sublen + align);
if (pnext == NULL) {
brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
num, sublen);
@@ -1229,7 +1256,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_tail(&bus->glom, pnext);
/* Adhere to start alignment requirements */
- pkt_align(pnext, sublen, BRCMF_SDALIGN);
+ pkt_align(pnext, sublen, align);
}
/* If all allocations succeeded, save packet chain
@@ -1305,8 +1332,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.seq_num = rxseq;
rd_new.len = dlen;
sdio_claim_host(bus->sdiodev->func[1]);
- errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
- BRCMF_SDIO_FT_SUPER);
+ errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
+ BRCMF_SDIO_FT_SUPER);
sdio_release_host(bus->sdiodev->func[1]);
bus->cur_read.len = rd_new.len_nxtfrm << 4;
@@ -1324,8 +1351,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.len = pnext->len;
rd_new.seq_num = rxseq++;
sdio_claim_host(bus->sdiodev->func[1]);
- errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
- BRCMF_SDIO_FT_SUB);
+ errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
+ BRCMF_SDIO_FT_SUB);
sdio_release_host(bus->sdiodev->func[1]);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
pnext->data, 32, "subframe:\n");
@@ -1357,7 +1384,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
- doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
dptr, pfirst->len,
@@ -1535,7 +1562,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
uint rxleft = 0; /* Remaining number of frames allowed */
int ret; /* Return code from calls */
uint rxcount = 0; /* Total frames read */
- struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
+ struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
u8 head_read = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1583,8 +1610,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxhdr, SDPCM_HDRLEN,
"RxHdr:\n");
- if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
- BRCMF_SDIO_FT_NORMAL)) {
+ if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
+ BRCMF_SDIO_FT_NORMAL)) {
sdio_release_host(bus->sdiodev->func[1]);
if (!bus->rxpending)
break;
@@ -1648,8 +1675,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
rd_new.seq_num = rd->seq_num;
sdio_claim_host(bus->sdiodev->func[1]);
- if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
- BRCMF_SDIO_FT_NORMAL)) {
+ if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
+ BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmu_pkt_buf_free_skb(pkt);
}
@@ -1693,7 +1720,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* Save superframe descriptor and allocate packet frame */
if (rd->channel == SDPCM_GLOM_CHANNEL) {
- if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+ if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
rd->len);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
@@ -1759,85 +1786,168 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
return;
}
+/* flag marking a dummy skb added for DMA alignment requirement */
+#define DUMMY_SKB_FLAG 0x10000
+/* bit mask of data length chopped from the previous packet */
+#define DUMMY_SKB_CHOP_LEN_MASK 0xffff
+/**
+ * brcmf_sdio_txpkt_prep - packet preparation for transmit
+ * @bus: brcmf_sdio structure pointer
+ * @pktq: packet list pointer
+ * @chan: virtual channel to transmit the packet
+ *
+ * Processes to be applied to the packet
+ * - Align data buffer pointer
+ * - Align data buffer length
+ * - Prepare header
+ * Return: negative value if there is error
+ */
+static int
+brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
+ uint chan)
+{
+ u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
+ int ntail;
+ struct sk_buff *pkt_next, *pkt_new;
+ u8 *dat_buf;
+ unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ struct brcmf_sdio_hdrinfo hd_info = {0};
+
+ /* SDIO ADMA requires at least 32 bit alignment */
+ head_align = 4;
+ sg_align = 4;
+ if (bus->sdiodev->pdata) {
+ head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
+ bus->sdiodev->pdata->sd_head_align : 4;
+ sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
+ bus->sdiodev->pdata->sd_sgentry_align : 4;
+ }
+ /* sg entry alignment should be a divisor of block size */
+ WARN_ON(blksize % sg_align);
+
+ pkt_next = pktq->next;
+ dat_buf = (u8 *)(pkt_next->data);
+
+ /* Check head padding */
+ head_pad = ((unsigned long)dat_buf % head_align);
+ if (head_pad) {
+ if (skb_headroom(pkt_next) < head_pad) {
+ bus->sdiodev->bus_if->tx_realloc++;
+ head_pad = 0;
+ if (skb_cow(pkt_next, head_pad))
+ return -ENOMEM;
+ }
+ skb_push(pkt_next, head_pad);
+ dat_buf = (u8 *)(pkt_next->data);
+ memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
+ }
+
+ /* Check tail padding */
+ pkt_new = NULL;
+ tail_chop = pkt_next->len % sg_align;
+ tail_pad = sg_align - tail_chop;
+ tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
+ if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
+ pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ if (pkt_new == NULL)
+ return -ENOMEM;
+ memcpy(pkt_new->data,
+ pkt_next->data + pkt_next->len - tail_chop,
+ tail_chop);
+ *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
+ skb_trim(pkt_next, pkt_next->len - tail_chop);
+ __skb_queue_after(pktq, pkt_next, pkt_new);
+ } else {
+ ntail = pkt_next->data_len + tail_pad -
+ (pkt_next->end - pkt_next->tail);
+ if (skb_cloned(pkt_next) || ntail > 0)
+ if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
+ return -ENOMEM;
+ if (skb_linearize(pkt_next))
+ return -ENOMEM;
+ dat_buf = (u8 *)(pkt_next->data);
+ __skb_put(pkt_next, tail_pad);
+ }
+
+ /* Now prep the header */
+ if (pkt_new)
+ hd_info.len = pkt_next->len + tail_chop;
+ else
+ hd_info.len = pkt_next->len - tail_pad;
+ hd_info.channel = chan;
+ hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+ brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
+
+ if (BRCMF_BYTES_ON() &&
+ ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+ (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
+ brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n");
+ else if (BRCMF_HDRS_ON())
+ brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen,
+ "Tx Header:\n");
+
+ return 0;
+}
+
+/**
+ * brcmf_sdio_txpkt_postp - packet post processing for transmit
+ * @bus: brcmf_sdio structure pointer
+ * @pktq: packet list pointer
+ *
+ * Processes to be applied to the packet
+ * - Remove head padding
+ * - Remove tail padding
+ */
+static void
+brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
+{
+ u8 *hdr;
+ u32 dat_offset;
+ u32 dummy_flags, chop_len;
+ struct sk_buff *pkt_next, *tmp, *pkt_prev;
+
+ skb_queue_walk_safe(pktq, pkt_next, tmp) {
+ dummy_flags = *(u32 *)(pkt_next->cb);
+ if (dummy_flags & DUMMY_SKB_FLAG) {
+ chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
+ if (chop_len) {
+ pkt_prev = pkt_next->prev;
+ memcpy(pkt_prev->data + pkt_prev->len,
+ pkt_next->data, chop_len);
+ skb_put(pkt_prev, chop_len);
+ }
+ __skb_unlink(pkt_next, pktq);
+ brcmu_pkt_buf_free_skb(pkt_next);
+ } else {
+ hdr = pkt_next->data + SDPCM_HWHDR_LEN;
+ dat_offset = le32_to_cpu(*(__le32 *)hdr);
+ dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
+ SDPCM_DOFFSET_SHIFT;
+ skb_pull(pkt_next, dat_offset);
+ }
+ }
+}
+
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
uint chan)
{
int ret;
- u8 *frame;
- u16 len, pad = 0;
- u32 swheader;
int i;
+ struct sk_buff_head localq;
brcmf_dbg(TRACE, "Enter\n");
- frame = (u8 *) (pkt->data);
-
- /* Add alignment padding, allocate new packet if needed */
- pad = ((unsigned long)frame % BRCMF_SDALIGN);
- if (pad) {
- if (skb_headroom(pkt) < pad) {
- brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
- skb_headroom(pkt), pad);
- bus->sdiodev->bus_if->tx_realloc++;
- ret = skb_cow(pkt, BRCMF_SDALIGN);
- if (ret)
- goto done;
- pad = ((unsigned long)frame % BRCMF_SDALIGN);
- }
- skb_push(pkt, pad);
- frame = (u8 *) (pkt->data);
- memset(frame, 0, pad + SDPCM_HDRLEN);
- }
- /* precondition: pad < BRCMF_SDALIGN */
-
- /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
- len = (u16) (pkt->len);
- *(__le16 *) frame = cpu_to_le16(len);
- *(((__le16 *) frame) + 1) = cpu_to_le16(~len);
-
- /* Software tag: channel, sequence number, data offset */
- swheader =
- ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
- (((pad +
- SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
-
- *(((__le32 *) frame) + 1) = cpu_to_le32(swheader);
- *(((__le32 *) frame) + 2) = 0;
-
-#ifdef DEBUG
- tx_packets[pkt->priority]++;
-#endif
-
- brcmf_dbg_hex_dump(BRCMF_BYTES_ON() &&
- ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
- (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)),
- frame, len, "Tx Frame:\n");
- brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
- ((BRCMF_CTL_ON() &&
- chan == SDPCM_CONTROL_CHANNEL) ||
- (BRCMF_DATA_ON() &&
- chan != SDPCM_CONTROL_CHANNEL))) &&
- BRCMF_HDRS_ON(),
- frame, min_t(u16, len, 16), "TxHdr:\n");
-
- /* Raise len to next SDIO block to eliminate tail command */
- if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
- u16 pad = bus->blocksize - (len % bus->blocksize);
- if ((pad <= bus->roundup) && (pad < bus->blocksize))
- len += pad;
- } else if (len % BRCMF_SDALIGN) {
- len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
- }
-
- /* Some controllers have trouble with odd bytes -- round to even */
- if (len & (ALIGNMENT - 1))
- len = roundup(len, ALIGNMENT);
+ __skb_queue_head_init(&localq);
+ __skb_queue_tail(&localq, pkt);
+ ret = brcmf_sdio_txpkt_prep(bus, &localq, chan);
+ if (ret)
+ goto done;
sdio_claim_host(bus->sdiodev->func[1]);
ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, pkt);
+ SDIO_FUNC_2, F2SYNC, &localq);
bus->sdcnt.f2txdata++;
if (ret < 0) {
@@ -1865,11 +1975,11 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
}
sdio_release_host(bus->sdiodev->func[1]);
if (ret == 0)
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
done:
- /* restore pkt buffer pointer before calling tx complete routine */
- skb_pull(pkt, SDPCM_HDRLEN + pad);
+ brcmf_sdio_txpkt_postp(bus, &localq);
+ __skb_dequeue_tail(&localq);
brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
return ret;
}
@@ -1880,7 +1990,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
u32 intstatus = 0;
int ret = 0, prec_out;
uint cnt = 0;
- uint datalen;
u8 tx_prec_map;
brcmf_dbg(TRACE, "Enter\n");
@@ -1896,7 +2005,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
break;
}
spin_unlock_bh(&bus->txqlock);
- datalen = pkt->len - SDPCM_HDRLEN;
ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
@@ -2221,7 +2329,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
}
} else {
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
}
sdio_release_host(bus->sdiodev->func[1]);
bus->ctrl_frame_stat = false;
@@ -2276,13 +2384,14 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ ulong flags;
brcmf_dbg(TRACE, "Enter\n");
datalen = pkt->len;
/* Add space for the header */
- skb_push(pkt, SDPCM_HDRLEN);
+ skb_push(pkt, bus->tx_hdrlen);
/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
prec = prio2prec((pkt->priority & PRIOMASK));
@@ -2293,10 +2402,9 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->sdcnt.fcqueued++;
/* Priority based enq */
- spin_lock_bh(&bus->txqlock);
+ spin_lock_irqsave(&bus->txqlock, flags);
if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
- skb_pull(pkt, SDPCM_HDRLEN);
- brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
+ skb_pull(pkt, bus->tx_hdrlen);
brcmf_err("out of bus->txq !!!\n");
ret = -ENOSR;
} else {
@@ -2307,7 +2415,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->txoff = true;
brcmf_txflowblock(bus->sdiodev->dev, true);
}
- spin_unlock_bh(&bus->txqlock);
+ spin_unlock_irqrestore(&bus->txqlock, flags);
#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2436,7 +2544,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
return ret;
}
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
return ret;
}
@@ -2446,19 +2554,19 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len;
- u32 swheader;
uint retries = 0;
u8 doff = 0;
int ret = -1;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ struct brcmf_sdio_hdrinfo hd_info = {0};
brcmf_dbg(TRACE, "Enter\n");
/* Back the pointer to make a room for bus header */
- frame = msg - SDPCM_HDRLEN;
- len = (msglen += SDPCM_HDRLEN);
+ frame = msg - bus->tx_hdrlen;
+ len = (msglen += bus->tx_hdrlen);
/* Add alignment padding (optional for ctl frames) */
doff = ((unsigned long)frame % BRCMF_SDALIGN);
@@ -2466,10 +2574,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
frame -= doff;
len += doff;
msglen += doff;
- memset(frame, 0, doff + SDPCM_HDRLEN);
+ memset(frame, 0, doff + bus->tx_hdrlen);
}
/* precondition: doff < BRCMF_SDALIGN */
- doff += SDPCM_HDRLEN;
+ doff += bus->tx_hdrlen;
/* Round send length to next SDIO block */
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
@@ -2491,18 +2599,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
brcmf_sdbrcm_bus_sleep(bus, false, false);
sdio_release_host(bus->sdiodev->func[1]);
- /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
- *(__le16 *) frame = cpu_to_le16((u16) msglen);
- *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen);
-
- /* Software tag: channel, sequence number, data offset */
- swheader =
- ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
- SDPCM_CHANNEL_MASK)
- | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
- SDPCM_DOFFSET_MASK);
- put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
- put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+ hd_info.len = (u16)msglen;
+ hd_info.channel = SDPCM_CONTROL_CHANNEL;
+ hd_info.dat_offset = doff;
+ brcmf_sdio_hdpack(bus, frame, &hd_info);
if (!data_ok(bus)) {
brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
@@ -3733,7 +3833,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
struct brcmf_sdio *bus;
struct brcmf_bus_dcmd *dlst;
u32 dngl_txglom;
- u32 dngl_txglomalign;
+ u32 txglomalign = 0;
u8 idx;
brcmf_dbg(TRACE, "Enter\n");
@@ -3752,7 +3852,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->txbound = BRCMF_TXBOUND;
bus->rxbound = BRCMF_RXBOUND;
bus->txminmax = BRCMF_TXMINMAX;
- bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ bus->tx_seq = SDPCM_SEQ_WRAP - 1;
INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
@@ -3794,8 +3894,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->sdiodev->bus_if->chip = bus->ci->chip;
bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
- /* Attach to the brcmf/OS/network interface */
- ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
+ /* default sdio bus header length for tx packet */
+ bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
+
+ /* Attach to the common layer, reserve hdr space */
+ ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev);
if (ret != 0) {
brcmf_err("brcmf_attach failed\n");
goto fail;
@@ -3827,9 +3930,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
dlst->param_len = sizeof(u32);
} else {
/* otherwise, set txglomalign */
- dngl_txglomalign = bus->sdiodev->bus_if->align;
+ if (sdiodev->pdata)
+ txglomalign = sdiodev->pdata->sd_sgentry_align;
+ /* SDIO ADMA requires at least 32 bit alignment */
+ if (txglomalign < 4)
+ txglomalign = 4;
dlst->name = "bus:txglomalign";
- dlst->param = (char *)&dngl_txglomalign;
+ dlst->param = (char *)&txglomalign;
dlst->param_len = sizeof(u32);
}
list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index 83ee53a7c76e..fad77dd2a3a5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -185,6 +185,10 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
ifevent->action, ifevent->ifidx, ifevent->bssidx,
ifevent->flags, ifevent->role);
+ if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
+ brcmf_dbg(EVENT, "event can be ignored\n");
+ return;
+ }
if (ifevent->ifidx >= BRCMF_MAX_IFS) {
brcmf_err("invalid interface index: %u\n",
ifevent->ifidx);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 665ef69e974b..ecabb04f33c3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -69,4 +69,25 @@ struct brcmf_fil_bss_enable_le {
__le32 enable;
};
+/**
+ * struct tdls_iovar - common structure for tdls iovars.
+ *
+ * @ea: ether address of peer station.
+ * @mode: mode value depending on specific tdls iovar.
+ * @chanspec: channel specification.
+ * @pad: unused (for future use).
+ */
+struct brcmf_tdls_iovar_le {
+ u8 ea[ETH_ALEN]; /* Station address */
+ u8 mode; /* mode: depends on iovar */
+ __le16 chanspec;
+ __le32 pad; /* future */
+};
+
+enum brcmf_tdls_manual_ep_ops {
+ BRCMF_TDLS_MANUAL_EP_CREATE = 1,
+ BRCMF_TDLS_MANUAL_EP_DELETE = 3,
+ BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 29b1f24c2d0f..82f9140f3d35 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -422,9 +422,12 @@ struct brcmf_fws_macdesc_table {
struct brcmf_fws_info {
struct brcmf_pub *drvr;
+ spinlock_t spinlock;
+ ulong flags;
struct brcmf_fws_stats stats;
struct brcmf_fws_hanger hanger;
enum brcmf_fws_fcmode fcmode;
+ bool fw_signals;
bool bcmc_credit_check;
struct brcmf_fws_macdesc_table desc;
struct workqueue_struct *fws_wq;
@@ -483,6 +486,18 @@ static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws,
}
#undef BRCMF_FWS_TLV_DEF
+static void brcmf_fws_lock(struct brcmf_fws_info *fws)
+ __acquires(&fws->spinlock)
+{
+ spin_lock_irqsave(&fws->spinlock, fws->flags);
+}
+
+static void brcmf_fws_unlock(struct brcmf_fws_info *fws)
+ __releases(&fws->spinlock)
+{
+ spin_unlock_irqrestore(&fws->spinlock, fws->flags);
+}
+
static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
{
u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
@@ -869,8 +884,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
skcb->state = BRCMF_FWS_SKBSTATE_TIM;
bus = fws->drvr->bus_if;
err = brcmf_fws_hdrpush(fws, skb);
- if (err == 0)
+ if (err == 0) {
+ brcmf_fws_unlock(fws);
err = brcmf_bus_txdata(bus, skb);
+ brcmf_fws_lock(fws);
+ }
if (err)
brcmu_pkt_buf_free_skb(skb);
return true;
@@ -905,26 +923,10 @@ static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
return 0;
}
-/* using macro so sparse checking does not complain
- * about locking imbalance.
- */
-#define brcmf_fws_lock(drvr, flags) \
-do { \
- flags = 0; \
- spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
-} while (0)
-
-/* using macro so sparse checking does not complain
- * about locking imbalance.
- */
-#define brcmf_fws_unlock(drvr, flags) \
- spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
-
static
int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry, *existing;
- ulong flags;
u8 mac_handle;
u8 ifidx;
u8 *addr;
@@ -938,10 +940,10 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
if (entry->occupied) {
brcmf_dbg(TRACE, "deleting %s mac %pM\n",
entry->name, addr);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
brcmf_fws_macdesc_cleanup(fws, entry, -1);
brcmf_fws_macdesc_deinit(entry);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
} else
fws->stats.mac_update_failed++;
return 0;
@@ -950,13 +952,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
existing = brcmf_fws_macdesc_lookup(fws, addr);
if (IS_ERR(existing)) {
if (!entry->occupied) {
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
entry->mac_handle = mac_handle;
brcmf_fws_macdesc_init(entry, addr, ifidx);
brcmf_fws_macdesc_set_name(fws, entry);
brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
} else {
fws->stats.mac_update_failed++;
@@ -964,13 +966,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
} else {
if (entry != existing) {
brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
memcpy(entry, existing,
offsetof(struct brcmf_fws_mac_descriptor, psq));
entry->mac_handle = mac_handle;
brcmf_fws_macdesc_deinit(existing);
brcmf_fws_macdesc_set_name(fws, entry);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
addr);
} else {
@@ -986,7 +988,6 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
- ulong flags;
u8 mac_handle;
int ret;
@@ -996,7 +997,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
fws->stats.mac_ps_update_failed++;
return -ESRCH;
}
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
/* a state update should wipe old credits */
entry->requested_credit = 0;
entry->requested_packet = 0;
@@ -1011,7 +1012,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
}
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
return ret;
}
@@ -1019,7 +1020,6 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
- ulong flags;
u8 ifidx;
int ret;
@@ -1038,7 +1038,7 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
entry->name);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
switch (type) {
case BRCMF_FWS_TYPE_INTERFACE_OPEN:
entry->state = BRCMF_FWS_STATE_OPEN;
@@ -1050,10 +1050,10 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
break;
default:
ret = -EINVAL;
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
goto fail;
}
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
return ret;
fail:
@@ -1065,7 +1065,6 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
- ulong flags;
entry = &fws->desc.nodes[data[1] & 0x1F];
if (!entry->occupied) {
@@ -1079,14 +1078,14 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
brcmf_fws_get_tlv_name(type), type, entry->name,
data[0], data[2]);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
entry->requested_credit = data[0];
else
entry->requested_packet = data[0];
entry->ac_bitmap = data[2];
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_SCHEDULE;
}
@@ -1160,7 +1159,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
{
/* only schedule dequeue when there are credits for delayed traffic */
- if (fws->fifo_credit_map & fws->fifo_delay_map)
+ if ((fws->fifo_credit_map & fws->fifo_delay_map) ||
+ (!brcmf_fws_fc_active(fws) && fws->fifo_delay_map))
queue_work(fws->fws_wq, &fws->fws_dequeue_work);
}
@@ -1383,7 +1383,6 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
u8 *data)
{
- ulong flags;
int i;
if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
@@ -1392,19 +1391,18 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
}
brcmf_dbg(DATA, "enter: data %pM\n", data);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
brcmf_fws_return_credits(fws, i, data[i]);
brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
fws->fifo_delay_map);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_SCHEDULE;
}
static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
{
- ulong lflags;
__le32 status_le;
u32 status;
u32 hslot;
@@ -1418,9 +1416,9 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
hslot = brcmf_txstatus_get_field(status, HSLOT);
genbit = brcmf_txstatus_get_field(status, GENERATION);
- brcmf_fws_lock(fws->drvr, lflags);
+ brcmf_fws_lock(fws);
brcmf_fws_txs_process(fws, flags, hslot, genbit);
- brcmf_fws_unlock(fws->drvr, lflags);
+ brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
@@ -1440,7 +1438,6 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
int i;
- ulong flags;
u8 *credits = data;
if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
@@ -1453,7 +1450,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
fws->creditmap_received = true;
brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
- brcmf_fws_lock(ifp->drvr, flags);
+ brcmf_fws_lock(fws);
for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) {
if (*credits)
fws->fifo_credit_map |= 1 << i;
@@ -1462,7 +1459,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
fws->fifo_credit[i] = *credits++;
}
brcmf_fws_schedule_deq(fws);
- brcmf_fws_unlock(ifp->drvr, flags);
+ brcmf_fws_unlock(fws);
return 0;
}
@@ -1471,18 +1468,18 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
void *data)
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
- ulong flags;
- brcmf_fws_lock(ifp->drvr, flags);
+ brcmf_fws_lock(fws);
if (fws)
fws->bcmc_credit_check = true;
- brcmf_fws_unlock(ifp->drvr, flags);
+ brcmf_fws_unlock(fws);
return 0;
}
int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
struct sk_buff *skb)
{
+ struct brcmf_skb_reorder_data *rd;
struct brcmf_fws_info *fws = drvr->fws;
u8 *signal_data;
s16 data_len;
@@ -1497,8 +1494,10 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
WARN_ON(signal_len > skb->len);
+ if (!signal_len)
+ return 0;
/* if flow control disabled, skip to packet data and leave */
- if (!signal_len || !drvr->fw_signals) {
+ if (!fws->fw_signals) {
skb_pull(skb, signal_len);
return 0;
}
@@ -1536,9 +1535,12 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
err = BRCMF_FWS_RET_OK_NOSCHEDULE;
switch (type) {
- case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
case BRCMF_FWS_TYPE_COMP_TXSTATUS:
break;
+ case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ rd->reorder = data;
+ break;
case BRCMF_FWS_TYPE_MACDESC_ADD:
case BRCMF_FWS_TYPE_MACDESC_DEL:
brcmf_fws_macdesc_indicate(fws, type, data);
@@ -1694,17 +1696,22 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
return PTR_ERR(entry);
brcmf_fws_precommit_skb(fws, fifo, skb);
+ entry->transit_count++;
+ if (entry->suppressed)
+ entry->suppr_transit_count++;
+ brcmf_fws_unlock(fws);
rc = brcmf_bus_txdata(bus, skb);
+ brcmf_fws_lock(fws);
brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
skcb->if_flags, skcb->htod, rc);
if (rc < 0) {
+ entry->transit_count--;
+ if (entry->suppressed)
+ entry->suppr_transit_count--;
brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
goto rollback;
}
- entry->transit_count++;
- if (entry->suppressed)
- entry->suppr_transit_count++;
fws->stats.pkt2bus++;
fws->stats.send_pkts[fifo]++;
if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
@@ -1741,11 +1748,11 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
struct brcmf_fws_info *fws = drvr->fws;
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct ethhdr *eh = (struct ethhdr *)(skb->data);
- ulong flags;
int fifo = BRCMF_FWS_FIFO_BCMC;
bool multicast = is_multicast_ether_addr(eh->h_dest);
bool pae = eh->h_proto == htons(ETH_P_PAE);
+ brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
/* determine the priority */
if (!skb->priority)
skb->priority = cfg80211_classify8021d(skb);
@@ -1754,14 +1761,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
if (pae)
atomic_inc(&ifp->pend_8021x_cnt);
- if (!brcmf_fws_fc_active(fws)) {
- /* If the protocol uses a data header, apply it */
- brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb);
-
- /* Use bus module to send data frame */
- return brcmf_bus_txdata(drvr->bus_if, skb);
- }
-
/* set control buffer information */
skcb->if_flags = 0;
skcb->state = BRCMF_FWS_SKBSTATE_NEW;
@@ -1769,7 +1768,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
if (!multicast)
fifo = brcmf_fws_prio2fifo[skb->priority];
- brcmf_fws_lock(drvr, flags);
+ brcmf_fws_lock(fws);
if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
fws->borrow_defer_timestamp = jiffies +
BRCMF_FWS_BORROW_DEFER_PERIOD;
@@ -1789,7 +1788,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
}
brcmu_pkt_buf_free_skb(skb);
}
- brcmf_fws_unlock(drvr, flags);
+ brcmf_fws_unlock(fws);
return 0;
}
@@ -1809,7 +1808,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
struct brcmf_fws_info *fws = ifp->drvr->fws;
struct brcmf_fws_mac_descriptor *entry;
- if (!ifp->ndev || !ifp->drvr->fw_signals)
+ if (!ifp->ndev)
return;
entry = &fws->desc.iface[ifp->ifidx];
@@ -1824,31 +1823,54 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
void brcmf_fws_del_interface(struct brcmf_if *ifp)
{
struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
- ulong flags;
if (!entry)
return;
- brcmf_fws_lock(ifp->drvr, flags);
+ brcmf_fws_lock(ifp->drvr->fws);
ifp->fws_desc = NULL;
brcmf_dbg(TRACE, "deleting %s\n", entry->name);
brcmf_fws_macdesc_deinit(entry);
brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
- brcmf_fws_unlock(ifp->drvr, flags);
+ brcmf_fws_unlock(ifp->drvr->fws);
}
static void brcmf_fws_dequeue_worker(struct work_struct *worker)
{
struct brcmf_fws_info *fws;
+ struct brcmf_pub *drvr;
struct sk_buff *skb;
- ulong flags;
int fifo;
+ u32 hslot;
+ u32 ifidx;
+ int ret;
fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
+ drvr = fws->drvr;
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked;
fifo--) {
+ if (!brcmf_fws_fc_active(fws)) {
+ while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) {
+ hslot = brcmf_skb_htod_tag_get_field(skb,
+ HSLOT);
+ brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
+ &skb, true);
+ ifidx = brcmf_skb_if_flags_get_field(skb,
+ INDEX);
+ brcmf_proto_hdrpush(drvr, ifidx, 0, skb);
+ /* Use bus module to send data frame */
+ brcmf_fws_unlock(fws);
+ ret = brcmf_bus_txdata(drvr->bus_if, skb);
+ brcmf_fws_lock(fws);
+ if (ret < 0)
+ brcmf_txfinalize(drvr, skb, false);
+ if (fws->bus_flow_blocked)
+ break;
+ }
+ continue;
+ }
while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
(fifo == BRCMF_FWS_FIFO_BCMC))) {
skb = brcmf_fws_deq(fws, fifo);
@@ -1876,42 +1898,43 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
}
}
}
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
}
int brcmf_fws_init(struct brcmf_pub *drvr)
{
+ struct brcmf_fws_info *fws;
u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
int rc;
- if (!drvr->fw_signals)
- return 0;
-
- spin_lock_init(&drvr->fws_spinlock);
-
drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
if (!drvr->fws) {
rc = -ENOMEM;
goto fail;
}
+ fws = drvr->fws;
+
+ spin_lock_init(&fws->spinlock);
+
/* set linkage back */
- drvr->fws->drvr = drvr;
- drvr->fws->fcmode = fcmode;
+ fws->drvr = drvr;
+ fws->fcmode = fcmode;
- drvr->fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
- if (drvr->fws->fws_wq == NULL) {
+ fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
+ if (fws->fws_wq == NULL) {
brcmf_err("workqueue creation failed\n");
rc = -EBADF;
goto fail;
}
- INIT_WORK(&drvr->fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
+ INIT_WORK(&fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
/* enable firmware signalling if fcmode active */
- if (drvr->fws->fcmode != BRCMF_FWS_FCMODE_NONE)
+ if (fws->fcmode != BRCMF_FWS_FCMODE_NONE)
tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS |
BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS |
- BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
+ BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
+ BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE;
rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP,
brcmf_fws_notify_credit_map);
@@ -1927,31 +1950,33 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
goto fail;
}
- /* setting the iovar may fail if feature is unsupported
+ /* Setting the iovar may fail if feature is unsupported
* so leave the rc as is so driver initialization can
- * continue.
+ * continue. Set mode back to none indicating not enabled.
*/
+ fws->fw_signals = true;
if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) {
brcmf_err("failed to set bdcv2 tlv signaling\n");
- goto fail_event;
+ fws->fcmode = BRCMF_FWS_FCMODE_NONE;
+ fws->fw_signals = false;
}
- brcmf_fws_hanger_init(&drvr->fws->hanger);
- brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0);
- brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other);
- brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
+ if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1))
+ brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
+
+ brcmf_fws_hanger_init(&fws->hanger);
+ brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
+ brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
+ brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
/* create debugfs file for statistics */
- brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
+ brcmf_debugfs_create_fws_stats(drvr, &fws->stats);
brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
- drvr->fw_signals ? "enabled" : "disabled", tlv);
+ fws->fw_signals ? "enabled" : "disabled", tlv);
return 0;
-fail_event:
- brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT);
- brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
fail:
brcmf_fws_deinit(drvr);
return rc;
@@ -1960,24 +1985,18 @@ fail:
void brcmf_fws_deinit(struct brcmf_pub *drvr)
{
struct brcmf_fws_info *fws = drvr->fws;
- ulong flags;
if (!fws)
return;
- /* disable firmware signalling entirely
- * to avoid using the workqueue.
- */
- drvr->fw_signals = false;
-
if (drvr->fws->fws_wq)
destroy_workqueue(drvr->fws->fws_wq);
/* cleanup */
- brcmf_fws_lock(drvr, flags);
+ brcmf_fws_lock(fws);
brcmf_fws_cleanup(fws, -1);
drvr->fws = NULL;
- brcmf_fws_unlock(drvr, flags);
+ brcmf_fws_unlock(fws);
/* free top structure */
kfree(fws);
@@ -1985,7 +2004,7 @@ void brcmf_fws_deinit(struct brcmf_pub *drvr)
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
{
- if (!fws)
+ if (!fws->creditmap_received)
return false;
return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
@@ -1993,17 +2012,16 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
- ulong flags;
u32 hslot;
if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
brcmu_pkt_buf_free_skb(skb);
return;
}
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
}
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 79555f006d53..d7a974532909 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -1430,7 +1430,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
IEEE80211_BAND_5GHZ);
wdev = &ifp->vif->wdev;
- cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
+ cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0,
GFP_ATOMIC);
kfree(mgmt_frame);
@@ -1895,7 +1895,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
IEEE80211_BAND_2GHZ :
IEEE80211_BAND_5GHZ);
- cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len,
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0,
GFP_ATOMIC);
brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 09786a539950..2b5407f002e5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -208,7 +208,7 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
*/
extern int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt);
+ uint flags, struct sk_buff_head *pktq);
extern int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 322cadc51ded..39e01a7c8556 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -614,7 +614,6 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
return 0;
fail:
- brcmf_txcomplete(dev, skb, false);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 7fa71f73cfe8..571f013cebbb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -3155,7 +3155,9 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
}
#ifdef CONFIG_NL80211_TESTMODE
-static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
+static int brcmf_cfg80211_testmode(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ void *data, int len)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
@@ -4126,6 +4128,53 @@ static void brcmf_cfg80211_crit_proto_stop(struct wiphy *wiphy,
clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
}
+static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
+{
+ int ret;
+
+ switch (oper) {
+ case NL80211_TDLS_DISCOVERY_REQ:
+ ret = BRCMF_TDLS_MANUAL_EP_DISCOVERY;
+ break;
+ case NL80211_TDLS_SETUP:
+ ret = BRCMF_TDLS_MANUAL_EP_CREATE;
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ ret = BRCMF_TDLS_MANUAL_EP_DELETE;
+ break;
+ default:
+ brcmf_err("unsupported operation: %d\n", oper);
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
+ struct net_device *ndev, u8 *peer,
+ enum nl80211_tdls_operation oper)
+{
+ struct brcmf_if *ifp;
+ struct brcmf_tdls_iovar_le info;
+ int ret = 0;
+
+ ret = brcmf_convert_nl80211_tdls_oper(oper);
+ if (ret < 0)
+ return ret;
+
+ ifp = netdev_priv(ndev);
+ memset(&info, 0, sizeof(info));
+ info.mode = (u8)ret;
+ if (peer)
+ memcpy(info.ea, peer, ETH_ALEN);
+
+ ret = brcmf_fil_iovar_data_set(ifp, "tdls_endpoint",
+ &info, sizeof(info));
+ if (ret < 0)
+ brcmf_err("tdls_endpoint iovar failed: ret=%d\n", ret);
+
+ return ret;
+}
+
static struct cfg80211_ops wl_cfg80211_ops = {
.add_virtual_intf = brcmf_cfg80211_add_iface,
.del_virtual_intf = brcmf_cfg80211_del_iface,
@@ -4164,9 +4213,8 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.stop_p2p_device = brcmf_p2p_stop_device,
.crit_proto_start = brcmf_cfg80211_crit_proto_start,
.crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
-#ifdef CONFIG_NL80211_TESTMODE
- .testmode_cmd = brcmf_cfg80211_testmode
-#endif
+ .tdls_oper = brcmf_cfg80211_tdls_oper,
+ CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
};
static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
@@ -4287,7 +4335,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_OFFCHAN_TX |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_SUPPORTS_TDLS;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
@@ -4908,6 +4957,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
goto cfg80211_p2p_attach_out;
}
+ err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
+ if (err) {
+ brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
+ wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
+ }
+
err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION,
&io_type);
if (err) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index e4fd1ee3d690..53365977bfd6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -679,27 +679,6 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
return mode == BCMA_CLKMODE_FAST;
}
-void ai_pci_up(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = container_of(sih, struct si_info, pub);
-
- if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
- bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], true);
-}
-
-/* Unconfigure and/or apply various WARs when going down */
-void ai_pci_down(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = container_of(sih, struct si_info, pub);
-
- if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
- bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], false);
-}
-
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index 89562c1fbf49..a8a267b5b87a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -183,9 +183,6 @@ extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
extern bool ai_deviceremoved(struct si_pub *sih);
-extern void ai_pci_down(struct si_pub *sih);
-extern void ai_pci_up(struct si_pub *sih);
-
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index bd982856d385..fa391e4eb098 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -928,9 +928,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
}
} else if (txs->phyerr) {
update_rate = false;
- brcms_err(wlc->hw->d11core,
- "%s: ampdu tx phy error (0x%x)\n",
- __func__, txs->phyerr);
+ brcms_dbg_ht(wlc->hw->d11core,
+ "%s: ampdu tx phy error (0x%x)\n",
+ __func__, txs->phyerr);
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 1860c572b3c4..4fb9635d3919 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di)
/*
* post receive buffers
- * return false is refill failed completely and ring is empty this will stall
- * the rx dma and user might want to call rxfill again asap. This unlikely
- * happens on memory-rich NIC, but often on memory-constrained dongle
+ * Return false if refill failed completely or dma mapping failed. The ring
+ * is empty, which will stall the rx dma and user might want to call rxfill
+ * again asap. This is unlikely to happen on a memory-rich NIC, but often on
+ * memory-constrained dongle.
*/
bool dma_rxfill(struct dma_pub *pub)
{
@@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub)
pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(di->dmadev, pa))
+ return false;
/* save the free packet pointer */
di->rxp[rxout] = p;
@@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
/* get physical address of buffer start */
pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
-
+ /* if mapping failed, free skb */
+ if (dma_mapping_error(di->dmadev, pa)) {
+ brcmu_pkt_buf_free_skb(p);
+ return;
+ }
/* With a DMA segment list, Descriptor table is filled
* using the segment list instead of looping over
* buffers in multi-chain DMA. Therefore, EOF for SGLIST
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 9fd6f2fef11b..4608e0eb1493 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -882,8 +882,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
mcl = le16_to_cpu(txh->MacTxControlLow);
if (txs->phyerr)
- brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
- txs->phyerr, txh->MainRates);
+ brcms_dbg_tx(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
+ txs->phyerr, txh->MainRates);
if (txs->frameid != le16_to_cpu(txh->TxFrameID)) {
brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n");
@@ -4652,7 +4652,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
wlc->band->phyrev = wlc_hw->band->phyrev;
wlc->band->radioid = wlc_hw->band->radioid;
wlc->band->radiorev = wlc_hw->band->radiorev;
-
+ brcms_dbg_info(core, "wl%d: phy %u/%u radio %x/%u\n", unit,
+ wlc->band->phytype, wlc->band->phyrev,
+ wlc->band->radioid, wlc->band->radiorev);
/* default contention windows size limits */
wlc_hw->band->CWmin = APHY_CWMIN;
wlc_hw->band->CWmax = PHY_CWMAX;
@@ -4667,7 +4669,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
brcms_c_coredisable(wlc_hw);
/* Match driver "down" state */
- ai_pci_down(wlc_hw->sih);
+ bcma_core_pci_down(wlc_hw->d11core->bus);
/* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF);
@@ -5010,12 +5012,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
*/
if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
/* put SB PCI in down state again */
- ai_pci_down(wlc_hw->sih);
+ bcma_core_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF);
return -ENOMEDIUM;
}
- ai_pci_up(wlc_hw->sih);
+ bcma_core_pci_up(wlc_hw->d11core->bus);
/* reset the d11 core */
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@@ -5212,7 +5214,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
/* turn off primary xtal and pll */
if (!wlc_hw->noreset) {
- ai_pci_down(wlc_hw->sih);
+ bcma_core_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF);
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 3d6b16ce4687..b2d6d6da3daf 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1137,8 +1137,9 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
gain0_15 = ((biq1 & 0xf) << 12) |
((tia & 0xf) << 8) |
((lna2 & 0x3) << 6) |
- ((lna2 &
- 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
+ ((lna2 & 0x3) << 4) |
+ ((lna1 & 0x3) << 2) |
+ ((lna1 & 0x3) << 0);
mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1328,6 +1329,43 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
+static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
+ u16 tia_gain, u16 lna2_gain)
+{
+ u32 i_thresh_l, q_thresh_l;
+ u32 i_thresh_h, q_thresh_h;
+ struct lcnphy_iq_est iq_est_h, iq_est_l;
+
+ wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
+ lna2_gain, 0);
+
+ wlc_lcnphy_rx_gain_override_enable(pi, true);
+ wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
+ udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG112, 0);
+ if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
+ return false;
+
+ wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
+ udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG112, 0);
+ if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
+ return false;
+
+ i_thresh_l = (iq_est_l.i_pwr << 1);
+ i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
+
+ q_thresh_l = (iq_est_l.q_pwr << 1);
+ q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
+ if ((iq_est_h.i_pwr > i_thresh_l) &&
+ (iq_est_h.i_pwr < i_thresh_h) &&
+ (iq_est_h.q_pwr > q_thresh_l) &&
+ (iq_est_h.q_pwr < q_thresh_h))
+ return true;
+
+ return false;
+}
+
static bool
wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1342,8 +1380,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
- int tia_gain;
- u32 received_power, rx_pwr_threshold;
+ int tia_gain, lna2_gain, biq1_gain;
+ bool set_gain;
u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
u16 values_to_save[11];
s16 *ptr;
@@ -1368,126 +1406,125 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
goto cal_done;
}
- if (module == 1) {
+ WARN_ON(module != 1);
+ tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
- tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
+ for (i = 0; i < 11; i++)
+ values_to_save[i] =
+ read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+ Core1TxControl_old = read_phy_reg(pi, 0x631);
+
+ or_phy_reg(pi, 0x631, 0x0015);
+
+ RFOverride0_old = read_phy_reg(pi, 0x44c);
+ RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+ rfoverride2_old = read_phy_reg(pi, 0x4b0);
+ rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+ rfoverride3_old = read_phy_reg(pi, 0x4f9);
+ rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+ rfoverride4_old = read_phy_reg(pi, 0x938);
+ rfoverride4val_old = read_phy_reg(pi, 0x939);
+ afectrlovr_old = read_phy_reg(pi, 0x43b);
+ afectrlovrval_old = read_phy_reg(pi, 0x43c);
+ old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+ old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
- for (i = 0; i < 11; i++)
- values_to_save[i] =
- read_radio_reg(pi, rxiq_cal_rf_reg[i]);
- Core1TxControl_old = read_phy_reg(pi, 0x631);
-
- or_phy_reg(pi, 0x631, 0x0015);
-
- RFOverride0_old = read_phy_reg(pi, 0x44c);
- RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
- rfoverride2_old = read_phy_reg(pi, 0x4b0);
- rfoverride2val_old = read_phy_reg(pi, 0x4b1);
- rfoverride3_old = read_phy_reg(pi, 0x4f9);
- rfoverride3val_old = read_phy_reg(pi, 0x4fa);
- rfoverride4_old = read_phy_reg(pi, 0x938);
- rfoverride4val_old = read_phy_reg(pi, 0x939);
- afectrlovr_old = read_phy_reg(pi, 0x43b);
- afectrlovrval_old = read_phy_reg(pi, 0x43c);
- old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
-
- tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
- if (tx_gain_override_old) {
- wlc_lcnphy_get_tx_gain(pi, &old_gains);
- tx_gain_index_old = pi_lcn->lcnphy_current_index;
- }
+ tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+ if (tx_gain_override_old) {
+ wlc_lcnphy_get_tx_gain(pi, &old_gains);
+ tx_gain_index_old = pi_lcn->lcnphy_current_index;
+ }
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
- mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+ mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
- write_radio_reg(pi, RADIO_2064_REG116, 0x06);
- write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
- write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
- write_radio_reg(pi, RADIO_2064_REG098, 0x03);
- write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
- mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
- write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
- write_radio_reg(pi, RADIO_2064_REG114, 0x01);
- write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
- write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
-
- mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
- mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
-
- mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
-
- wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
- write_phy_reg(pi, 0x6da, 0xffff);
- or_phy_reg(pi, 0x6db, 0x3);
- wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
- wlc_lcnphy_rx_gain_override_enable(pi, true);
-
- tia_gain = 8;
- rx_pwr_threshold = 950;
- while (tia_gain > 0) {
- tia_gain -= 1;
- wlc_lcnphy_set_rx_gain_by_distribution(pi,
- 0, 0, 2, 2,
- (u16)
- tia_gain, 1, 0);
- udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+ write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+ write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+ write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+ write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+ mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+ write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+ write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+
+ mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+ mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
- received_power =
- wlc_lcnphy_measure_digital_power(pi, 2000);
- if (received_power < rx_pwr_threshold)
- break;
- }
- result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
+ mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
- wlc_lcnphy_stop_tx_tone(pi);
+ write_phy_reg(pi, 0x6da, 0xffff);
+ or_phy_reg(pi, 0x6db, 0x3);
+
+ wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+ for (lna2_gain = 3; lna2_gain >= 0; lna2_gain--) {
+ for (tia_gain = 4; tia_gain >= 0; tia_gain--) {
+ for (biq1_gain = 6; biq1_gain >= 0; biq1_gain--) {
+ set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
+ (u16)
+ biq1_gain,
+ (u16)
+ tia_gain,
+ (u16)
+ lna2_gain);
+ if (!set_gain)
+ continue;
+
+ result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
+ goto stop_tone;
+ }
+ }
+ }
- write_phy_reg(pi, 0x631, Core1TxControl_old);
+stop_tone:
+ wlc_lcnphy_stop_tx_tone(pi);
- write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
- write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
- write_phy_reg(pi, 0x4b0, rfoverride2_old);
- write_phy_reg(pi, 0x4b1, rfoverride2val_old);
- write_phy_reg(pi, 0x4f9, rfoverride3_old);
- write_phy_reg(pi, 0x4fa, rfoverride3val_old);
- write_phy_reg(pi, 0x938, rfoverride4_old);
- write_phy_reg(pi, 0x939, rfoverride4val_old);
- write_phy_reg(pi, 0x43b, afectrlovr_old);
- write_phy_reg(pi, 0x43c, afectrlovrval_old);
- write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+ write_phy_reg(pi, 0x631, Core1TxControl_old);
+
+ write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x4b0, rfoverride2_old);
+ write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+ write_phy_reg(pi, 0x4f9, rfoverride3_old);
+ write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+ write_phy_reg(pi, 0x938, rfoverride4_old);
+ write_phy_reg(pi, 0x939, rfoverride4val_old);
+ write_phy_reg(pi, 0x43b, afectrlovr_old);
+ write_phy_reg(pi, 0x43c, afectrlovrval_old);
+ write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+ write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
- wlc_lcnphy_clear_trsw_override(pi);
+ wlc_lcnphy_clear_trsw_override(pi);
- mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+ mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
- for (i = 0; i < 11; i++)
- write_radio_reg(pi, rxiq_cal_rf_reg[i],
- values_to_save[i]);
+ for (i = 0; i < 11; i++)
+ write_radio_reg(pi, rxiq_cal_rf_reg[i],
+ values_to_save[i]);
- if (tx_gain_override_old)
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
- else
- wlc_lcnphy_disable_tx_gain_override(pi);
+ if (tx_gain_override_old)
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+ else
+ wlc_lcnphy_disable_tx_gain_override(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
- wlc_lcnphy_rx_gain_override_enable(pi, false);
- }
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+ wlc_lcnphy_rx_gain_override_enable(pi, false);
cal_done:
kfree(ptr);
@@ -1789,6 +1826,19 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
write_radio_reg(pi, RADIO_2064_REG038, 3);
write_radio_reg(pi, RADIO_2064_REG091, 7);
}
+
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ static const u8 reg038[14] = {
+ 0xd, 0xe, 0xd, 0xd, 0xd, 0xc, 0xa,
+ 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0
+ };
+
+ write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
+ write_radio_reg(pi, RADIO_2064_REG091, 0x3);
+ write_radio_reg(pi, RADIO_2064_REG038, 0x3);
+
+ write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
+ }
}
static int
@@ -1983,6 +2033,16 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
+ mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
+ mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
+ mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
}
} else {
mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2069,13 +2129,23 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
(auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
+ mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
}
static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rfseq, ind;
+ enum lcnphy_tssi_mode mode;
+ u8 tssi_sel;
+ if (pi->sh->boardflags & BFL_FEM) {
+ tssi_sel = 0x1;
+ mode = LCNPHY_TSSI_EXT;
+ } else {
+ tssi_sel = 0xe;
+ mode = LCNPHY_TSSI_POST_PA;
+ }
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_ptr = &ind;
@@ -2096,7 +2166,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
- wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
+ wlc_lcnphy_set_tssi_mux(pi, mode);
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2132,9 +2202,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
- mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
}
@@ -2181,6 +2252,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
+ mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+
wlc_lcnphy_pwrctrl_rssiparams(pi);
}
@@ -2799,6 +2874,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
read_radio_reg(pi, RADIO_2064_REG007) & 1;
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
+ u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
+
idleTssi = read_phy_reg(pi, 0x4ab);
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
@@ -2816,6 +2893,12 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
wlc_lcnphy_tssi_setup(pi);
+
+ mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
+ mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
+
+ wlc_lcnphy_set_bbmult(pi, 0x0);
+
wlc_phy_do_dummy_tx(pi, true, OFF);
idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
>> 0);
@@ -2837,6 +2920,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
+ wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
wlc_lcnphy_set_tx_gain(pi, &old_gains);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3050,6 +3134,11 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
+ mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
+ mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
+ mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
+ mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
+ mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
@@ -3851,7 +3940,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
target_gains.pad_gain = 21;
target_gains.dac_gain = 0;
wlc_lcnphy_set_tx_gain(pi, &target_gains);
- wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
@@ -3862,6 +3950,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
lcnphy_recal ? LCNPHY_CAL_RECAL :
LCNPHY_CAL_FULL), false);
} else {
+ wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
@@ -4283,20 +4372,20 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
u16 pa_gain;
u16 gm_gain;
- if (CHSPEC_IS5G(pi->radio_chanspec))
- pa_gain = 0x70;
- else
- pa_gain = 0x70;
-
if (pi->sh->boardflags & BFL_FEM)
pa_gain = 0x10;
+ else
+ pa_gain = 0x60;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
tab.tbl_ptr = &val;
+ /* fixed gm_gain value for iPA */
+ gm_gain = 15;
for (j = 0; j < 128; j++) {
- gm_gain = gain_table[j].gm;
+ if (pi->sh->boardflags & BFL_FEM)
+ gm_gain = gain_table[j].gm;
val = (((u32) pa_gain << 24) |
(gain_table[j].pad << 16) |
(gain_table[j].pga << 8) | gm_gain);
@@ -4507,7 +4596,10 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
write_phy_reg(pi, 0x4ea, 0x4688);
- mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+ if (pi->sh->boardflags & BFL_FEM)
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+ else
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
@@ -4518,6 +4610,13 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
wlc_lcnphy_rcal(pi);
wlc_lcnphy_rc_cal(pi);
+
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
+ write_radio_reg(pi, RADIO_2064_REG033, 0x19);
+ write_radio_reg(pi, RADIO_2064_REG039, 0xe);
+ }
+
}
static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4530,6 +4629,7 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
uint idx;
u8 phybw40;
struct phytbl_info tab;
+ const struct phytbl_info *tb;
u32 val;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
@@ -4547,22 +4647,20 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tab);
}
- tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
- tab.tbl_width = 16;
- tab.tbl_ptr = &val;
- tab.tbl_len = 1;
-
- val = 114;
- tab.tbl_offset = 0;
- wlc_lcnphy_write_table(pi, &tab);
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
+ tab.tbl_width = 16;
+ tab.tbl_ptr = &val;
+ tab.tbl_len = 1;
- val = 130;
- tab.tbl_offset = 1;
- wlc_lcnphy_write_table(pi, &tab);
+ val = 150;
+ tab.tbl_offset = 0;
+ wlc_lcnphy_write_table(pi, &tab);
- val = 6;
- tab.tbl_offset = 8;
- wlc_lcnphy_write_table(pi, &tab);
+ val = 220;
+ tab.tbl_offset = 1;
+ wlc_lcnphy_write_table(pi, &tab);
+ }
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->sh->boardflags & BFL_FEM)
@@ -4576,7 +4674,6 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
}
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
- const struct phytbl_info *tb;
int l;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
@@ -4597,21 +4694,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tb[idx]);
}
- if ((pi->sh->boardflags & BFL_FEM)
- && !(pi->sh->boardflags & BFL_FEM_BT))
- wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_epa);
- else if (pi->sh->boardflags & BFL_FEM_BT) {
- if (pi->sh->boardrev < 0x1250)
- wlc_lcnphy_write_table(
- pi,
- &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa);
+ if (pi->sh->boardflags & BFL_FEM) {
+ if (pi->sh->boardflags & BFL_FEM_BT) {
+ if (pi->sh->boardrev < 0x1250)
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
+ else
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250;
+ } else {
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313_epa;
+ }
+ } else {
+ if (pi->sh->boardflags & BFL_FEM_BT)
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa;
else
- wlc_lcnphy_write_table(
- pi,
- &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250);
- } else
- wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313);
-
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313;
+ }
+ wlc_lcnphy_write_table(pi, tb);
wlc_lcnphy_load_rfpower(pi);
wlc_lcnphy_clear_papd_comptable(pi);
@@ -4955,6 +5053,8 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
+ if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
+ wlc_lcnphy_tssi_setup(pi);
}
void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -4993,8 +5093,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
return false;
- if ((pi->sh->boardflags & BFL_FEM) &&
- (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
index 622c01ca72c5..d7fa312214f3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -1507,117 +1507,103 @@ static const u32 dot11lcn_gain_tbl_5G[] = {
const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = {
{&dot11lcn_gain_tbl_rev0,
- sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
+ ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18,
0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_rev0,
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_rev0,
- sizeof(dot11lcn_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
,
};
static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = {
{&dot11lcn_gain_tbl_rev1,
- sizeof(dot11lcn_gain_tbl_rev1) / sizeof(dot11lcn_gain_tbl_rev1[0]), 18,
+ ARRAY_SIZE(dot11lcn_gain_tbl_rev1), 18,
0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_rev0,
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_rev0,
- sizeof(dot11lcn_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
,
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
{&dot11lcn_gain_tbl_2G,
- sizeof(dot11lcn_gain_tbl_2G) / sizeof(dot11lcn_gain_tbl_2G[0]), 18, 0,
+ ARRAY_SIZE(dot11lcn_gain_tbl_2G), 18, 0,
32}
,
{&dot11lcn_aux_gain_idx_tbl_2G,
- sizeof(dot11lcn_aux_gain_idx_tbl_2G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_2G[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_2G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_2G,
- sizeof(dot11lcn_gain_idx_tbl_2G) / sizeof(dot11lcn_gain_idx_tbl_2G[0]),
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_2G),
13, 0, 32}
,
{&dot11lcn_gain_val_tbl_2G,
- sizeof(dot11lcn_gain_val_tbl_2G) / sizeof(dot11lcn_gain_val_tbl_2G[0]),
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_2G),
17, 0, 8}
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = {
{&dot11lcn_gain_tbl_5G,
- sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
+ ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0,
32}
,
{&dot11lcn_aux_gain_idx_tbl_5G,
- sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_5G,
- sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G),
13, 0, 32}
,
{&dot11lcn_gain_val_tbl_5G,
- sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_5G),
17, 0, 8}
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = {
{&dot11lcn_gain_tbl_extlna_2G,
- sizeof(dot11lcn_gain_tbl_extlna_2G) /
- sizeof(dot11lcn_gain_tbl_extlna_2G[0]), 18, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_tbl_extlna_2G), 18, 0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_extlna_2G,
- sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_extlna_2G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_extlna_2G,
- sizeof(dot11lcn_gain_idx_tbl_extlna_2G) /
- sizeof(dot11lcn_gain_idx_tbl_extlna_2G[0]), 13, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_extlna_2G), 13, 0, 32}
,
{&dot11lcn_gain_val_tbl_extlna_2G,
- sizeof(dot11lcn_gain_val_tbl_extlna_2G) /
- sizeof(dot11lcn_gain_val_tbl_extlna_2G[0]), 17, 0, 8}
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_extlna_2G), 17, 0, 8}
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = {
{&dot11lcn_gain_tbl_5G,
- sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
+ ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0,
32}
,
{&dot11lcn_aux_gain_idx_tbl_5G,
- sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_5G,
- sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G),
13, 0, 32}
,
{&dot11lcn_gain_val_tbl_5G,
- sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_5G),
17, 0, 8}
};
const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 =
- sizeof(dot11lcnphytbl_rx_gain_info_rev0) /
- sizeof(dot11lcnphytbl_rx_gain_info_rev0[0]);
+ ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_rev0);
const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz =
- sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2) /
- sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2[0]);
+ ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_2G_rev2);
const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz =
- sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2) /
- sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2[0]);
+ ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_5G_rev2);
static const u16 dot11lcn_min_sig_sq_tbl_rev0[] = {
0x014d,
@@ -2058,6 +2044,73 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
0x0005,
};
+static const u16 dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo[] = {
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+};
+
static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
0x0004,
0x0004,
@@ -2771,89 +2824,79 @@ static const u32 dot11lcn_papd_compdelta_tbl_rev0[] = {
const struct phytbl_info dot11lcnphytbl_info_rev0[] = {
{&dot11lcn_min_sig_sq_tbl_rev0,
- sizeof(dot11lcn_min_sig_sq_tbl_rev0) /
- sizeof(dot11lcn_min_sig_sq_tbl_rev0[0]), 2, 0, 16}
+ ARRAY_SIZE(dot11lcn_min_sig_sq_tbl_rev0), 2, 0, 16}
,
{&dot11lcn_noise_scale_tbl_rev0,
- sizeof(dot11lcn_noise_scale_tbl_rev0) /
- sizeof(dot11lcn_noise_scale_tbl_rev0[0]), 1, 0, 16}
+ ARRAY_SIZE(dot11lcn_noise_scale_tbl_rev0), 1, 0, 16}
,
{&dot11lcn_fltr_ctrl_tbl_rev0,
- sizeof(dot11lcn_fltr_ctrl_tbl_rev0) /
- sizeof(dot11lcn_fltr_ctrl_tbl_rev0[0]), 11, 0, 32}
+ ARRAY_SIZE(dot11lcn_fltr_ctrl_tbl_rev0), 11, 0, 32}
,
{&dot11lcn_ps_ctrl_tbl_rev0,
- sizeof(dot11lcn_ps_ctrl_tbl_rev0) /
- sizeof(dot11lcn_ps_ctrl_tbl_rev0[0]), 12, 0, 32}
+ ARRAY_SIZE(dot11lcn_ps_ctrl_tbl_rev0), 12, 0, 32}
,
{&dot11lcn_gain_idx_tbl_rev0,
- sizeof(dot11lcn_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_rev0,
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
,
{&dot11lcn_sw_ctrl_tbl_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_rev0[0]), 15, 0, 16}
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_rev0), 15, 0, 16}
,
{&dot11lcn_nf_table_rev0,
- sizeof(dot11lcn_nf_table_rev0) / sizeof(dot11lcn_nf_table_rev0[0]), 16,
+ ARRAY_SIZE(dot11lcn_nf_table_rev0), 16,
0, 8}
,
{&dot11lcn_gain_val_tbl_rev0,
- sizeof(dot11lcn_gain_val_tbl_rev0) /
- sizeof(dot11lcn_gain_val_tbl_rev0[0]), 17, 0, 8}
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_rev0), 17, 0, 8}
,
{&dot11lcn_gain_tbl_rev0,
- sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
+ ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18,
0, 32}
,
{&dot11lcn_spur_tbl_rev0,
- sizeof(dot11lcn_spur_tbl_rev0) / sizeof(dot11lcn_spur_tbl_rev0[0]), 20,
+ ARRAY_SIZE(dot11lcn_spur_tbl_rev0), 20,
0, 8}
,
{&dot11lcn_unsup_mcs_tbl_rev0,
- sizeof(dot11lcn_unsup_mcs_tbl_rev0) /
- sizeof(dot11lcn_unsup_mcs_tbl_rev0[0]), 23, 0, 16}
+ ARRAY_SIZE(dot11lcn_unsup_mcs_tbl_rev0), 23, 0, 16}
,
{&dot11lcn_iq_local_tbl_rev0,
- sizeof(dot11lcn_iq_local_tbl_rev0) /
- sizeof(dot11lcn_iq_local_tbl_rev0[0]), 0, 0, 16}
+ ARRAY_SIZE(dot11lcn_iq_local_tbl_rev0), 0, 0, 16}
,
{&dot11lcn_papd_compdelta_tbl_rev0,
- sizeof(dot11lcn_papd_compdelta_tbl_rev0) /
- sizeof(dot11lcn_papd_compdelta_tbl_rev0[0]), 24, 0, 32}
+ ARRAY_SIZE(dot11lcn_papd_compdelta_tbl_rev0), 24, 0, 32}
,
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = {
&dot11lcn_sw_ctrl_tbl_4313_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0[0]), 15, 0, 16
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_rev0), 15, 0, 16
+};
+
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa = {
+ &dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo,
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = {
&dot11lcn_sw_ctrl_tbl_4313_epa_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0[0]), 15, 0, 16
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = {
&dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[0]), 15, 0, 16
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = {
&dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[0]), 15, 0, 16
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0), 15, 0, 16
};
const u32 dot11lcnphytbl_info_sz_rev0 =
- sizeof(dot11lcnphytbl_info_rev0) / sizeof(dot11lcnphytbl_info_rev0[0]);
+ ARRAY_SIZE(dot11lcnphytbl_info_rev0);
const struct lcnphy_tx_gain_tbl_entry
dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
@@ -2988,134 +3031,134 @@ dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
};
const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = {
- {7, 0, 31, 0, 72},
- {7, 0, 31, 0, 70},
- {7, 0, 31, 0, 68},
- {7, 0, 30, 0, 67},
- {7, 0, 29, 0, 68},
- {7, 0, 28, 0, 68},
- {7, 0, 27, 0, 69},
- {7, 0, 26, 0, 70},
- {7, 0, 25, 0, 70},
- {7, 0, 24, 0, 71},
- {7, 0, 23, 0, 72},
- {7, 0, 23, 0, 70},
- {7, 0, 22, 0, 71},
- {7, 0, 21, 0, 72},
- {7, 0, 21, 0, 70},
- {7, 0, 21, 0, 68},
- {7, 0, 21, 0, 66},
- {7, 0, 21, 0, 64},
- {7, 0, 21, 0, 63},
- {7, 0, 20, 0, 64},
- {7, 0, 19, 0, 65},
- {7, 0, 19, 0, 64},
- {7, 0, 18, 0, 65},
- {7, 0, 18, 0, 64},
- {7, 0, 17, 0, 65},
- {7, 0, 17, 0, 64},
- {7, 0, 16, 0, 65},
- {7, 0, 16, 0, 64},
- {7, 0, 16, 0, 62},
- {7, 0, 16, 0, 60},
- {7, 0, 16, 0, 58},
- {7, 0, 15, 0, 61},
- {7, 0, 15, 0, 59},
- {7, 0, 14, 0, 61},
- {7, 0, 14, 0, 60},
- {7, 0, 14, 0, 58},
- {7, 0, 13, 0, 60},
- {7, 0, 13, 0, 59},
- {7, 0, 12, 0, 62},
- {7, 0, 12, 0, 60},
- {7, 0, 12, 0, 58},
- {7, 0, 11, 0, 62},
- {7, 0, 11, 0, 60},
- {7, 0, 11, 0, 59},
- {7, 0, 11, 0, 57},
- {7, 0, 10, 0, 61},
- {7, 0, 10, 0, 59},
- {7, 0, 10, 0, 57},
- {7, 0, 9, 0, 62},
- {7, 0, 9, 0, 60},
- {7, 0, 9, 0, 58},
- {7, 0, 9, 0, 57},
- {7, 0, 8, 0, 62},
- {7, 0, 8, 0, 60},
- {7, 0, 8, 0, 58},
- {7, 0, 8, 0, 57},
- {7, 0, 8, 0, 55},
- {7, 0, 7, 0, 61},
+ {15, 0, 31, 0, 72},
+ {15, 0, 31, 0, 70},
+ {15, 0, 31, 0, 68},
+ {15, 0, 30, 0, 68},
+ {15, 0, 29, 0, 69},
+ {15, 0, 28, 0, 69},
+ {15, 0, 27, 0, 70},
+ {15, 0, 26, 0, 70},
+ {15, 0, 25, 0, 71},
+ {15, 0, 24, 0, 72},
+ {15, 0, 23, 0, 73},
+ {15, 0, 23, 0, 71},
+ {15, 0, 22, 0, 72},
+ {15, 0, 21, 0, 73},
+ {15, 0, 21, 0, 71},
+ {15, 0, 21, 0, 69},
+ {15, 0, 21, 0, 67},
+ {15, 0, 21, 0, 65},
+ {15, 0, 21, 0, 63},
+ {15, 0, 20, 0, 65},
+ {15, 0, 19, 0, 66},
+ {15, 0, 19, 0, 64},
+ {15, 0, 18, 0, 66},
+ {15, 0, 18, 0, 64},
+ {15, 0, 17, 0, 66},
+ {15, 0, 17, 0, 64},
+ {15, 0, 16, 0, 66},
+ {15, 0, 16, 0, 64},
+ {15, 0, 16, 0, 62},
+ {15, 0, 16, 0, 61},
+ {15, 0, 16, 0, 59},
+ {15, 0, 15, 0, 61},
+ {15, 0, 15, 0, 59},
+ {15, 0, 14, 0, 62},
+ {15, 0, 14, 0, 60},
+ {15, 0, 14, 0, 58},
+ {15, 0, 13, 0, 61},
+ {15, 0, 13, 0, 59},
+ {15, 0, 12, 0, 62},
+ {15, 0, 12, 0, 61},
+ {15, 0, 12, 0, 59},
+ {15, 0, 11, 0, 62},
+ {15, 0, 11, 0, 61},
+ {15, 0, 11, 0, 59},
+ {15, 0, 11, 0, 57},
+ {15, 0, 10, 0, 61},
+ {15, 0, 10, 0, 59},
+ {15, 0, 10, 0, 58},
+ {15, 0, 9, 0, 62},
+ {15, 0, 9, 0, 61},
+ {15, 0, 9, 0, 59},
+ {15, 0, 9, 0, 57},
+ {15, 0, 8, 0, 62},
+ {15, 0, 8, 0, 61},
+ {15, 0, 8, 0, 59},
+ {15, 0, 8, 0, 57},
+ {15, 0, 8, 0, 56},
+ {15, 0, 8, 0, 54},
+ {15, 0, 8, 0, 53},
+ {15, 0, 8, 0, 51},
+ {15, 0, 8, 0, 50},
+ {7, 0, 7, 0, 69},
+ {7, 0, 7, 0, 67},
+ {7, 0, 7, 0, 65},
+ {7, 0, 7, 0, 64},
+ {7, 0, 7, 0, 62},
{7, 0, 7, 0, 60},
{7, 0, 7, 0, 58},
- {7, 0, 7, 0, 56},
+ {7, 0, 7, 0, 57},
{7, 0, 7, 0, 55},
{7, 0, 6, 0, 62},
- {7, 0, 6, 0, 60},
- {7, 0, 6, 0, 58},
+ {7, 0, 6, 0, 61},
+ {7, 0, 6, 0, 59},
{7, 0, 6, 0, 57},
- {7, 0, 6, 0, 55},
+ {7, 0, 6, 0, 56},
{7, 0, 6, 0, 54},
- {7, 0, 6, 0, 52},
+ {7, 0, 6, 0, 53},
{7, 0, 5, 0, 61},
- {7, 0, 5, 0, 59},
- {7, 0, 5, 0, 57},
+ {7, 0, 5, 0, 60},
+ {7, 0, 5, 0, 58},
{7, 0, 5, 0, 56},
- {7, 0, 5, 0, 54},
+ {7, 0, 5, 0, 55},
{7, 0, 5, 0, 53},
- {7, 0, 5, 0, 51},
- {7, 0, 4, 0, 62},
- {7, 0, 4, 0, 60},
- {7, 0, 4, 0, 58},
+ {7, 0, 5, 0, 52},
+ {7, 0, 5, 0, 50},
+ {7, 0, 5, 0, 49},
+ {7, 0, 5, 0, 47},
{7, 0, 4, 0, 57},
- {7, 0, 4, 0, 55},
+ {7, 0, 4, 0, 56},
{7, 0, 4, 0, 54},
- {7, 0, 4, 0, 52},
+ {7, 0, 4, 0, 53},
{7, 0, 4, 0, 51},
- {7, 0, 4, 0, 49},
+ {7, 0, 4, 0, 50},
{7, 0, 4, 0, 48},
+ {7, 0, 4, 0, 47},
{7, 0, 4, 0, 46},
- {7, 0, 3, 0, 60},
- {7, 0, 3, 0, 58},
- {7, 0, 3, 0, 57},
- {7, 0, 3, 0, 55},
- {7, 0, 3, 0, 54},
- {7, 0, 3, 0, 52},
+ {7, 0, 4, 0, 44},
+ {7, 0, 4, 0, 43},
+ {7, 0, 4, 0, 42},
+ {7, 0, 4, 0, 41},
+ {7, 0, 4, 0, 40},
{7, 0, 3, 0, 51},
- {7, 0, 3, 0, 49},
+ {7, 0, 3, 0, 50},
{7, 0, 3, 0, 48},
+ {7, 0, 3, 0, 47},
{7, 0, 3, 0, 46},
- {7, 0, 3, 0, 45},
{7, 0, 3, 0, 44},
{7, 0, 3, 0, 43},
+ {7, 0, 3, 0, 42},
{7, 0, 3, 0, 41},
- {7, 0, 2, 0, 61},
- {7, 0, 2, 0, 59},
- {7, 0, 2, 0, 57},
- {7, 0, 2, 0, 56},
- {7, 0, 2, 0, 54},
- {7, 0, 2, 0, 53},
- {7, 0, 2, 0, 51},
- {7, 0, 2, 0, 50},
- {7, 0, 2, 0, 48},
- {7, 0, 2, 0, 47},
- {7, 0, 2, 0, 46},
- {7, 0, 2, 0, 44},
- {7, 0, 2, 0, 43},
- {7, 0, 2, 0, 42},
- {7, 0, 2, 0, 41},
- {7, 0, 2, 0, 39},
- {7, 0, 2, 0, 38},
- {7, 0, 2, 0, 37},
- {7, 0, 2, 0, 36},
- {7, 0, 2, 0, 35},
- {7, 0, 2, 0, 34},
- {7, 0, 2, 0, 33},
- {7, 0, 2, 0, 32},
- {7, 0, 1, 0, 63},
- {7, 0, 1, 0, 61},
- {7, 0, 1, 0, 59},
- {7, 0, 1, 0, 57},
+ {3, 0, 3, 0, 56},
+ {3, 0, 3, 0, 54},
+ {3, 0, 3, 0, 53},
+ {3, 0, 3, 0, 51},
+ {3, 0, 3, 0, 50},
+ {3, 0, 3, 0, 48},
+ {3, 0, 3, 0, 47},
+ {3, 0, 3, 0, 46},
+ {3, 0, 3, 0, 44},
+ {3, 0, 3, 0, 43},
+ {3, 0, 3, 0, 42},
+ {3, 0, 3, 0, 41},
+ {3, 0, 3, 0, 39},
+ {3, 0, 3, 0, 38},
+ {3, 0, 3, 0, 37},
+ {3, 0, 3, 0, 36},
+ {3, 0, 3, 0, 35},
+ {3, 0, 3, 0, 34},
};
const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
index 5f75e16bf5a7..489422a36085 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
@@ -20,6 +20,7 @@
extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[];
extern const u32 dot11lcnphytbl_rx_gain_info_sz_rev0;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313;
+extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa_combo;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c
index c1ec2a4dd8c0..92d299aa257c 100644
--- a/drivers/net/wireless/cw1200/bh.c
+++ b/drivers/net/wireless/cw1200/bh.c
@@ -465,8 +465,8 @@ static int cw1200_bh(void *arg)
(rx || tx || term || suspend || priv->bh_error);
}), status);
- pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n",
- rx, tx, term, suspend, status);
+ pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
+ rx, tx, term, suspend, priv->bh_error, status);
/* Did an error occur? */
if ((status < 0 && status != -ERESTARTSYS) ||
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 3724e739cbf4..090f01577dd2 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -507,7 +507,7 @@ u32 cw1200_dpll_from_clk(u16 clk_khz)
case 0xCB20: /* 52000 KHz */
return 0x07627091;
default:
- pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n",
+ pr_err("Unknown Refclk freq (0x%04x), using 26000KHz\n",
clk_khz);
return 0x0EC4F121;
}
diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h
index 7afc613c3706..48086e849515 100644
--- a/drivers/net/wireless/cw1200/wsm.h
+++ b/drivers/net/wireless/cw1200/wsm.h
@@ -832,7 +832,7 @@ struct wsm_tx {
/* the MSDU shall be terminated. Overrides the global */
/* dot11MaxTransmitMsduLifeTime setting [optional] */
/* Device will set the default value if this is 0. */
- u32 expire_time;
+ __le32 expire_time;
/* WSM_HT_TX_... */
__le32 ht_tx_parameters;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 6307a4e36c85..c275dc1623fe 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1425,7 +1425,7 @@ static int prism2_hw_init2(struct net_device *dev, int initial)
}
list_for_each(ptr, &local->hostap_interfaces) {
iface = list_entry(ptr, struct hostap_interface, list);
- memcpy(iface->dev->dev_addr, dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(iface->dev, dev);
}
} else if (local->fw_ap)
prism2_check_sta_fw_version(local);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 15f0fad39add..a1257c92afc4 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -66,7 +66,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
list_add(&iface->list, &local->hostap_interfaces);
mdev = local->dev;
- memcpy(dev->dev_addr, mdev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(dev, mdev);
dev->base_addr = mdev->base_addr;
dev->irq = mdev->irq;
dev->mem_start = mdev->mem_start;
@@ -667,7 +667,7 @@ static int prism2_open(struct net_device *dev)
if (local->no_pri) {
printk(KERN_DEBUG "%s: could not set interface UP - no PRI "
"f/w\n", dev->name);
- return 1;
+ return -ENODEV;
}
if ((local->func->card_present && !local->func->card_present(local)) ||
@@ -682,7 +682,7 @@ static int prism2_open(struct net_device *dev)
printk(KERN_WARNING "%s: could not enable MAC port\n",
dev->name);
prism2_close(dev);
- return 1;
+ return -ENODEV;
}
if (!local->dev_enabled)
prism2_callback(local, PRISM2_CALLBACK_ENABLE);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index fe31590a51b2..aea667b430c3 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -887,6 +887,7 @@ il3945_remove_debugfs(void *il, void *il_sta)
*/
static void
il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *il_sta)
{
}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index c092033945cc..f09e257759d5 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -475,6 +475,8 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
}
}
+#define SMALL_PACKET_SIZE 256
+
static void
il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
struct ieee80211_rx_status *stats)
@@ -483,14 +485,13 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
- u16 len = le16_to_cpu(rx_hdr->len);
+ u32 len = le16_to_cpu(rx_hdr->len);
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
+ u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order;
/* We received data from the HW, so stop the watchdog */
- if (unlikely
- (len + IL39_RX_FRAME_SIZE >
- PAGE_SIZE << il->hw_params.rx_page_order)) {
+ if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) {
D_DROP("Corruption detected!\n");
return;
}
@@ -506,26 +507,32 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
D_INFO("Woke queues - frame received on passive channel\n");
}
- skb = dev_alloc_skb(128);
+ skb = dev_alloc_skb(SMALL_PACKET_SIZE);
if (!skb) {
IL_ERR("dev_alloc_skb failed\n");
return;
}
if (!il3945_mod_params.sw_crypto)
- il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
+ il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt,
le32_to_cpu(rx_end->status), stats);
- skb_add_rx_frag(skb, 0, rxb->page,
- (void *)rx_hdr->payload - (void *)pkt, len,
- len);
-
+ /* If frame is small enough to fit into skb->head, copy it
+ * and do not consume a full page
+ */
+ if (len <= SMALL_PACKET_SIZE) {
+ memcpy(skb_put(skb, len), rx_hdr->payload, len);
+ } else {
+ skb_add_rx_frag(skb, 0, rxb->page,
+ (void *)rx_hdr->payload - (void *)pkt, len,
+ fraglen);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+ }
il_update_stats(il, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(il->hw, skb);
- il->alloc_rxb_page--;
- rxb->page = NULL;
}
#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 7acf5ee23582..5ab50a5b48b1 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -574,9 +574,11 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
return decrypt_out;
}
+#define SMALL_PACKET_SIZE 256
+
static void
il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
- u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
+ u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
@@ -598,21 +600,25 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
il_set_decrypted_flag(il, hdr, ampdu_status, stats))
return;
- skb = dev_alloc_skb(128);
+ skb = dev_alloc_skb(SMALL_PACKET_SIZE);
if (!skb) {
IL_ERR("dev_alloc_skb failed\n");
return;
}
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len,
- len);
+ if (len <= SMALL_PACKET_SIZE) {
+ memcpy(skb_put(skb, len), hdr, len);
+ } else {
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
+ len, PAGE_SIZE << il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+ }
il_update_stats(il, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(il->hw, skb);
- il->alloc_rxb_page--;
- rxb->page = NULL;
}
/* Called for N_RX (legacy ABG frames), or
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index ed3c42a63a43..3ccbaf791b48 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2803,6 +2803,7 @@ il4965_rs_remove_debugfs(void *il, void *il_sta)
*/
static void
il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *il_sta)
{
}
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index cbaa5c2c410f..3eb2102ce236 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -22,6 +22,8 @@ config IWLWIFI
Intel Wireless WiFi Link 6150BGN 2 Adapter
Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
Intel 2000 Series Wi-Fi Adapters
+ Intel 7260 Wi-Fi Adapter
+ Intel 3160 Wi-Fi Adapter
This driver uses the kernel's mac80211 subsystem.
@@ -46,17 +48,16 @@ config IWLDVM
depends on IWLWIFI
default IWLWIFI
help
- This is the driver supporting the DVM firmware which is
- currently the only firmware available for existing devices.
+ This is the driver that supports the DVM firmware which is
+ used by most existing devices (with the exception of 7260
+ and 3160).
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
depends on IWLWIFI
help
- This is the driver supporting the MVM firmware which is
- currently only available for 7000 series devices.
-
- Say yes if you have such a device.
+ This is the driver that supports the MVM firmware which is
+ currently only available for 7260 and 3160 devices.
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR
@@ -127,20 +128,3 @@ config IWLWIFI_DEVICE_TRACING
If unsure, say Y so we can help you better when problems
occur.
endmenu
-
-config IWLWIFI_P2P
- def_bool y
- bool "iwlwifi experimental P2P support"
- depends on IWLWIFI
- help
- This option enables experimental P2P support for some devices
- based on microcode support. Since P2P support is still under
- development, this option may even enable it for some devices
- now that turn out to not support it in the future due to
- microcode restrictions.
-
- To determine if your microcode supports the experimental P2P
- offered by this option, check if the driver advertises AP
- support when it is loaded.
-
- Say Y only if you want to experiment with P2P.
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 18355110deff..f2a86ffc3b4c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -106,7 +106,6 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
#define STATUS_CHANNEL_SWITCH_PENDING 11
#define STATUS_SCAN_COMPLETE 12
#define STATUS_POWER_PMI 13
-#define STATUS_SCAN_ROC_EXPIRED 14
struct iwl_ucode_capabilities;
@@ -250,7 +249,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
/* scan */
void iwlagn_post_scan(struct iwl_priv *priv);
-void iwlagn_disable_roc(struct iwl_priv *priv);
int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
@@ -265,10 +263,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
enum iwl_scan_type scan_type,
enum ieee80211_band band);
-void iwl_scan_roc_expired(struct iwl_priv *priv);
-void iwl_scan_offchannel_skb(struct iwl_priv *priv);
-void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
-
/* For faster active scanning, scan will move to the next channel if fewer than
* PLCP_QUIET_THRESH packets are heard on this channel within
* ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d5329489245a..d94f8ab15004 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -69,19 +69,7 @@
} while (0)
/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-
#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
@@ -89,7 +77,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.open = simple_open, \
@@ -98,8 +85,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.read = iwl_dbgfs_##name##_read, \
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 60a4e0d15715..a79fdd137f95 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -540,7 +540,6 @@ struct iwl_rxon_context {
enum iwl_scan_type {
IWL_SCAN_NORMAL,
IWL_SCAN_RADIO_RESET,
- IWL_SCAN_ROC,
};
/**
@@ -825,12 +824,6 @@ struct iwl_priv {
struct reply_tx_error_statistics reply_tx_stats;
struct reply_agg_tx_error_statistics reply_agg_tx_stats;
- /* remain-on-channel offload support */
- struct ieee80211_channel *hw_roc_channel;
- struct delayed_work hw_roc_disable_work;
- int hw_roc_duration;
- bool hw_roc_setup, hw_roc_start_notified;
-
/* bt coex */
u8 bt_enable_flag;
u8 bt_status;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 319387263e12..cae4d3182e33 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -76,29 +76,6 @@ static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
},
};
-static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_AP),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
- {
- .max = 2,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
-};
-
static const struct ieee80211_iface_combination
iwlagn_iface_combinations_dualmode[] = {
{ .num_different_channels = 1,
@@ -114,21 +91,6 @@ iwlagn_iface_combinations_dualmode[] = {
},
};
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_p2p[] = {
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .beacon_int_infra_match = true,
- .limits = iwlagn_p2p_sta_go_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
- },
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .limits = iwlagn_p2p_2sta_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
- },
-};
-
/*
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
@@ -186,19 +148,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
- hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
- hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(iwlagn_iface_combinations_p2p);
- } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+ if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
hw->wiphy->iface_combinations =
iwlagn_iface_combinations_dualmode;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
}
- hw->wiphy->max_remain_on_channel_duration = 500;
-
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
@@ -1159,126 +1115,6 @@ done:
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_channel *channel,
- int duration,
- enum ieee80211_roc_type type)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- int err = 0;
-
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- /* mac80211 should not scan while ROC or ROC while scanning */
- if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
- err = -EBUSY;
- goto out;
- }
-
- iwl_scan_cancel_timeout(priv, 100);
-
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- err = -EBUSY;
- goto out;
- }
- }
-
- priv->hw_roc_channel = channel;
- /* convert from ms to TU */
- priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
- priv->hw_roc_start_notified = false;
- cancel_delayed_work(&priv->hw_roc_disable_work);
-
- if (!ctx->is_active) {
- static const struct iwl_qos_info default_qos_data = {
- .def_qos_parm = {
- .ac[0] = {
- .cw_min = cpu_to_le16(3),
- .cw_max = cpu_to_le16(7),
- .aifsn = 2,
- .edca_txop = cpu_to_le16(1504),
- },
- .ac[1] = {
- .cw_min = cpu_to_le16(7),
- .cw_max = cpu_to_le16(15),
- .aifsn = 2,
- .edca_txop = cpu_to_le16(3008),
- },
- .ac[2] = {
- .cw_min = cpu_to_le16(15),
- .cw_max = cpu_to_le16(1023),
- .aifsn = 3,
- },
- .ac[3] = {
- .cw_min = cpu_to_le16(15),
- .cw_max = cpu_to_le16(1023),
- .aifsn = 7,
- },
- },
- };
-
- ctx->is_active = true;
- ctx->qos_data = default_qos_data;
- ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
- memcpy(ctx->staging.node_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- memcpy(ctx->staging.bssid_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- err = iwlagn_commit_rxon(priv, ctx);
- if (err)
- goto out;
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
- RXON_FILTER_PROMISC_MSK |
- RXON_FILTER_CTL2HOST_MSK;
-
- err = iwlagn_commit_rxon(priv, ctx);
- if (err) {
- iwlagn_disable_roc(priv);
- goto out;
- }
- priv->hw_roc_setup = true;
- }
-
- err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
- if (err)
- iwlagn_disable_roc(priv);
-
- out:
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return err;
-}
-
-static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
-
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
- iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
- iwlagn_disable_roc(priv);
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event)
@@ -1434,12 +1270,8 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
viftype, vif->addr);
- cancel_delayed_work_sync(&priv->hw_roc_disable_work);
-
mutex_lock(&priv->mutex);
- iwlagn_disable_roc(priv);
-
if (!iwl_is_ready_rf(priv)) {
IWL_WARN(priv, "Try to add interface when device not ready\n");
err = -EINVAL;
@@ -1766,8 +1598,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
.channel_switch = iwlagn_mac_channel_switch,
.flush = iwlagn_mac_flush,
.tx_last_beacon = iwlagn_mac_tx_last_beacon,
- .remain_on_channel = iwlagn_mac_remain_on_channel,
- .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
.rssi_callback = iwlagn_mac_rssi_callback,
.set_tim = iwlagn_mac_set_tim,
};
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 1531a4fc0960..7aad766865cf 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -587,11 +587,6 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
- if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P)
- priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
-
priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -854,14 +849,6 @@ void iwl_down(struct iwl_priv *priv)
iwl_scan_cancel_timeout(priv, 200);
- /*
- * If active, scanning won't cancel it, so say it expired.
- * No race since we hold the mutex here and a new one
- * can't come in at this time.
- */
- if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
- ieee80211_remain_on_channel_expired(priv->hw);
-
exit_pending =
test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -1002,41 +989,6 @@ static void iwl_bg_restart(struct work_struct *data)
}
}
-
-
-
-void iwlagn_disable_roc(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->hw_roc_setup)
- return;
-
- ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- priv->hw_roc_channel = NULL;
-
- memset(ctx->staging.node_addr, 0, ETH_ALEN);
-
- iwlagn_commit_rxon(priv, ctx);
-
- ctx->is_active = false;
- priv->hw_roc_setup = false;
-}
-
-static void iwlagn_disable_roc_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- hw_roc_disable_work.work);
-
- mutex_lock(&priv->mutex);
- iwlagn_disable_roc(priv);
- mutex_unlock(&priv->mutex);
-}
-
/*****************************************************************************
*
* driver setup and teardown
@@ -1053,8 +1005,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
- INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
- iwlagn_disable_roc_work);
iwl_setup_scan_deferred_work(priv);
@@ -1082,7 +1032,6 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->bt_full_concurrency);
cancel_work_sync(&priv->bt_runtime_config);
- cancel_delayed_work_sync(&priv->hw_roc_disable_work);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
@@ -1169,12 +1118,6 @@ static void iwl_option_config(struct iwl_priv *priv)
#else
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
#endif
-
-#ifdef CONFIG_IWLWIFI_P2P
- IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
-#else
- IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n");
-#endif
}
static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
@@ -1315,10 +1258,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
ucode_flags = fw->ucode_capa.flags;
-#ifndef CONFIG_IWLWIFI_P2P
- ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
-#endif
-
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
@@ -1413,7 +1352,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* if not PAN, then don't support P2P -- might be a uCode
* packaging bug or due to the eeprom check above
*/
- ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
priv->sta_key_max_num = STA_KEY_MAX_NUM;
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 1b693944123b..b647e506564c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2826,9 +2826,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
lq_sta->flush_timer = 0;
lq_sta->supp_rates = sta->supp_rates[sband->band];
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
sta_id);
@@ -3319,7 +3316,8 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
* station is added we ignore it.
*/
static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta)
{
}
static struct rate_control_ops rs_ops = {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index cd1ad0019185..d7ce2f12a907 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -564,11 +564,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].type = 0; /* BSS */
cmd.slots[1].type = 1; /* PAN */
- if (priv->hw_roc_setup) {
- /* both contexts must be used for this to happen */
- slot1 = IWL_MIN_SLOT_TIME;
- slot0 = 3000;
- } else if (ctx_bss->vif && ctx_pan->vif) {
+ if (ctx_bss->vif && ctx_pan->vif) {
int bcnint = ctx_pan->beacon_int;
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 8c686a5b90ac..35e0ee8b4e5b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -100,9 +100,6 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
ieee80211_scan_completed(priv->hw, aborted);
}
- if (priv->scan_type == IWL_SCAN_ROC)
- iwl_scan_roc_expired(priv);
-
priv->scan_type = IWL_SCAN_NORMAL;
priv->scan_vif = NULL;
priv->scan_request = NULL;
@@ -130,9 +127,6 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
goto out_settings;
}
- if (priv->scan_type == IWL_SCAN_ROC)
- iwl_scan_roc_expired(priv);
-
if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
int err;
@@ -284,12 +278,6 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
le32_to_cpu(notif->tsf_low),
notif->status, notif->beacon_timer);
- if (priv->scan_type == IWL_SCAN_ROC &&
- !priv->hw_roc_start_notified) {
- ieee80211_ready_on_channel(priv->hw);
- priv->hw_roc_start_notified = true;
- }
-
return 0;
}
@@ -697,8 +685,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
- if (priv->scan_type != IWL_SCAN_ROC &&
- iwl_is_any_associated(priv)) {
+ if (iwl_is_any_associated(priv)) {
u16 interval = 0;
u32 extra;
u32 suspend_time = 100;
@@ -706,9 +693,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
switch (priv->scan_type) {
- case IWL_SCAN_ROC:
- WARN_ON(1);
- break;
case IWL_SCAN_RADIO_RESET:
interval = 0;
break;
@@ -728,11 +712,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->suspend_time = cpu_to_le32(scan_suspend_time);
IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
scan_suspend_time, interval);
- } else if (priv->scan_type == IWL_SCAN_ROC) {
- scan->suspend_time = 0;
- scan->max_out_time = 0;
- scan->quiet_time = 0;
- scan->quiet_plcp_th = 0;
}
switch (priv->scan_type) {
@@ -774,9 +753,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
} else
IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
break;
- case IWL_SCAN_ROC:
- IWL_DEBUG_SCAN(priv, "Start ROC scan.\n");
- break;
}
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
@@ -898,7 +874,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan_cmd_size - sizeof(*scan));
break;
case IWL_SCAN_RADIO_RESET:
- case IWL_SCAN_ROC:
/* use bcast addr, will not be transmitted but must be valid */
cmd_len = iwl_fill_probe_req(
(struct ieee80211_mgmt *)scan->data,
@@ -926,46 +901,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
is_active, n_probes,
(void *)&scan->data[cmd_len]);
break;
- case IWL_SCAN_ROC: {
- struct iwl_scan_channel *scan_ch;
- int n_chan, i;
- u16 dwell;
-
- dwell = iwl_limit_dwell(priv, priv->hw_roc_duration);
- n_chan = DIV_ROUND_UP(priv->hw_roc_duration, dwell);
-
- scan->channel_count = n_chan;
-
- scan_ch = (void *)&scan->data[cmd_len];
-
- for (i = 0; i < n_chan; i++) {
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->channel =
- cpu_to_le16(priv->hw_roc_channel->hw_value);
-
- if (i == n_chan - 1)
- dwell = priv->hw_roc_duration - i * dwell;
-
- scan_ch->active_dwell =
- scan_ch->passive_dwell = cpu_to_le16(dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (priv->hw_roc_channel->band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- scan_ch++;
- }
- }
-
- break;
}
if (scan->channel_count == 0) {
@@ -1035,7 +970,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
scan_type == IWL_SCAN_NORMAL ? "" :
- scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
"internal short ");
set_bit(STATUS_SCANNING, &priv->status);
@@ -1149,40 +1083,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
mutex_unlock(&priv->mutex);
}
}
-
-void iwl_scan_roc_expired(struct iwl_priv *priv)
-{
- /*
- * The status bit should be set here, to prevent a race
- * where the atomic_read returns 1, but before the execution continues
- * iwl_scan_offchannel_skb_status() checks if the status bit is set
- */
- set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
-
- if (atomic_read(&priv->num_aux_in_flight) == 0) {
- ieee80211_remain_on_channel_expired(priv->hw);
- priv->hw_roc_channel = NULL;
- schedule_delayed_work(&priv->hw_roc_disable_work,
- 10 * HZ);
-
- clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
- } else {
- IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
- atomic_read(&priv->num_aux_in_flight));
- }
-}
-
-void iwl_scan_offchannel_skb(struct iwl_priv *priv)
-{
- WARN_ON(!priv->hw_roc_start_notified);
- atomic_inc(&priv->num_aux_in_flight);
-}
-
-void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
-{
- if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
- test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
- iwl_scan_roc_expired(priv);
- }
-}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 5ee983faa679..da442b81370a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -87,7 +87,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
priv->lib->bt_params->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
ieee80211_is_reassoc_req(fc) ||
- skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
tx_flags |= TX_CMD_FLG_IGNORE_BT;
@@ -478,9 +478,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
if (sta_priv && sta_priv->client && !is_agg)
atomic_inc(&sta_priv->pending_frames);
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- iwl_scan_offchannel_skb(priv);
-
return 0;
drop_unlock_sta:
@@ -1158,7 +1155,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct sk_buff *skb;
struct iwl_rxon_context *ctx;
bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
- bool is_offchannel_skb;
tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
IWLAGN_TX_RES_TID_POS;
@@ -1178,8 +1174,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
__skb_queue_head_init(&skbs);
- is_offchannel_skb = false;
-
if (tx_resp->frame_count == 1) {
u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1256,8 +1250,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
- is_offchannel_skb =
- (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
freed++;
}
@@ -1271,14 +1263,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (!is_agg && freed != 1)
IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
- /*
- * An offchannel frame can be send only on the AUX queue, where
- * there is no aggregation (and reordering) so it only is single
- * skb is expected to be processed.
- */
- if (is_offchannel_skb && freed != 1)
- IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
-
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
iwl_get_tx_fail_reason(status), status);
@@ -1298,9 +1282,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
ieee80211_tx_status_ni(priv->hw, skb);
}
- if (is_offchannel_skb)
- iwl_scan_offchannel_skb_status(priv);
-
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 22b7fa5b971a..76e14c046d94 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -99,6 +99,7 @@ static const struct iwl_base_params iwl7000_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
};
static const struct iwl_ht_params iwl7000_ht_params = {
@@ -126,6 +127,16 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
};
+const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
+ .name = "Intel(R) Dual Band Wireless AC 7260",
+ .fw_name_pre = IWL7260_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+ .high_temp = true,
+};
+
const struct iwl_cfg iwl7260_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 7260",
.fw_name_pre = IWL7260_FW_PRE,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 83b9ff6ff3ad..e4d370bff306 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -152,6 +152,7 @@ struct iwl_base_params {
unsigned int wd_timeout;
u32 max_event_log_size;
const bool shadow_reg_enable;
+ const bool pcie_l1_allowed;
};
/*
@@ -205,6 +206,7 @@ struct iwl_eeprom_params {
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
+ * @high_temp: Is this NIC is designated to be in high temperature.
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -233,6 +235,7 @@ struct iwl_cfg {
enum iwl_led_mode led_mode;
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
+ bool high_temp;
};
/*
@@ -283,6 +286,7 @@ extern const struct iwl_cfg iwl135_bgn_cfg;
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
extern const struct iwl_cfg iwl7260_2ac_cfg;
+extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp;
extern const struct iwl_cfg iwl7260_2n_cfg;
extern const struct iwl_cfg iwl7260_n_cfg;
extern const struct iwl_cfg iwl3160_2ac_cfg;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 7edb8519c8a4..b2bb32a781dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -145,6 +145,7 @@ do { \
#define IWL_DL_RX 0x01000000
#define IWL_DL_ISR 0x02000000
#define IWL_DL_HT 0x04000000
+#define IWL_DL_EXTERNAL 0x08000000
/* 0xF0000000 - 0x10000000 */
#define IWL_DL_11H 0x10000000
#define IWL_DL_STATS 0x20000000
@@ -153,6 +154,7 @@ do { \
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
+#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 4491c1c72cc7..684c416d3493 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -33,10 +33,11 @@
static inline bool iwl_trace_data(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- if (ieee80211_is_data(hdr->frame_control))
- return skb->protocol != cpu_to_be16(ETH_P_PAE);
- return false;
+ if (!ieee80211_is_data(hdr->frame_control))
+ return false;
+ return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO);
}
static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index d0162d426f88..99e1da3123c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -843,7 +843,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int i;
bool load_module = false;
- fw->ucode_capa.max_probe_length = 200;
+ fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
@@ -1032,8 +1032,10 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
int ret;
drv = kzalloc(sizeof(*drv), GFP_KERNEL);
- if (!drv)
- return NULL;
+ if (!drv) {
+ ret = -ENOMEM;
+ goto err;
+ }
drv->trans = trans;
drv->dev = trans->dev;
@@ -1078,7 +1080,7 @@ err_free_dbgfs:
err_free_drv:
#endif
kfree(drv);
-
+err:
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index f844d5c748c0..a1223680bc70 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -74,13 +74,24 @@
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
+ * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
+ * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
+ * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ * (rather than two) IPv6 addresses
+ * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
*/
enum iwl_ucode_tlv_flag {
- IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
- IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
- IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
- IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
- IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
+ IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
+ IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
+ IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
+ IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
+ IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
+ IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
+ IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
+ IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
+ IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
+ IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
};
/* The default calibrate table size if not specified by firmware file */
@@ -88,6 +99,9 @@ enum iwl_ucode_tlv_flag {
#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
+/* The default max probe length if not specified by the firmware file */
+#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
+
/**
* enum iwl_ucode_type
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 305c81f2c2b4..dfa4d2e3aaa2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -33,6 +33,8 @@
#include "iwl-io.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
+#include "iwl-fh.h"
+#include "iwl-csr.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
@@ -166,3 +168,68 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
}
}
IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
+
+static const char *get_fh_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+#undef IWL_CMD
+}
+
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
+{
+ int i;
+ static const u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (buf) {
+ int pos = 0;
+ size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(trans, fh_tbl[i]));
+
+ return pos;
+ }
+#endif
+
+ IWL_ERR(trans, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+ IWL_ERR(trans, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(trans, fh_tbl[i]));
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index fd9f5b97fff3..63d10ec08dbc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -77,4 +77,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
+/* Error handling */
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index acd2665afb8c..b76a9a8fc0b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -118,6 +118,7 @@ static const u8 iwl_nvm_channels[] = {
#define LAST_2GHZ_HT_PLUS 9
#define LAST_5GHZ_HT 161
+#define DEFAULT_MAX_TX_POWER 16
/* rate data (static) */
static struct ieee80211_rate iwl_cfg80211_rates[] = {
@@ -232,8 +233,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* Initialize regulatory-based run-time data */
- /* TODO: read the real value from the NVM */
- channel->max_power = 0;
+ /*
+ * Default value - highest tx power value. max_power
+ * is not used in mvm, and is used for backwards compatibility
+ */
+ channel->max_power = DEFAULT_MAX_TX_POWER;
is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
IWL_DEBUG_EEPROM(dev,
"Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 98c7aa7346da..976448a57d02 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -93,7 +93,7 @@ struct iwl_cfg;
* 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
* capabilities advertized by the fw file (in TLV format).
* 2) The driver layer starts the op_mode (ops->start)
- * 3) The op_mode registers registers mac80211
+ * 3) The op_mode registers mac80211
* 4) The op_mode is governed by mac80211
* 5) The driver layer stops the op_mode
*/
@@ -112,7 +112,7 @@ struct iwl_cfg;
* @stop: stop the op_mode. Must free all the memory allocated.
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
- * HCMD the this Rx responds to.
+ * HCMD this Rx responds to.
* This callback may sleep, it is called from a threaded IRQ handler.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8d91422c5982..dd57a36ecb10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -180,7 +180,7 @@ struct iwl_rx_packet {
* enum CMD_MODE - how to send the host commands ?
*
* @CMD_SYNC: The caller will be stalled until the fw responds to the command
- * @CMD_ASYNC: Return right away and don't want for the response
+ * @CMD_ASYNC: Return right away and don't wait for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
*/
@@ -218,7 +218,7 @@ struct iwl_device_cmd {
*
* @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* ring. The transport layer doesn't map the command's buffer to DMA, but
- * rather copies it to an previously allocated DMA buffer. This flag tells
+ * rather copies it to a previously allocated DMA buffer. This flag tells
* the transport layer not to copy the command, but to map the existing
* buffer (that is passed in) instead. This saves the memcpy and allows
* commands that are bigger than the fixed buffer to be submitted.
@@ -243,7 +243,7 @@ enum iwl_hcmd_dataflag {
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers) - valid for SYNC mode only
* @flags: can be CMD_*
- * @len: array of the lenths of the chunks in data
+ * @len: array of the lengths of the chunks in data
* @dataflags: IWL_HCMD_DFL_*
* @id: id of the host command
*/
@@ -396,8 +396,6 @@ struct iwl_trans;
* May sleep
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
- * @suspend: stop the device unless WoWLAN is configured
- * @resume: resume activity of the device
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
@@ -443,10 +441,7 @@ struct iwl_trans_ops {
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
-#ifdef CONFIG_PM_SLEEP
- int (*suspend)(struct iwl_trans *trans);
- int (*resume)(struct iwl_trans *trans);
-#endif
+
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
@@ -700,18 +695,6 @@ static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
return trans->ops->dbgfs_register(trans, dir);
}
-#ifdef CONFIG_PM_SLEEP
-static inline int iwl_trans_suspend(struct iwl_trans *trans)
-{
- return trans->ops->suspend(trans);
-}
-
-static inline int iwl_trans_resume(struct iwl_trans *trans)
-{
- return trans->ops->resume(trans);
-}
-#endif
-
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trans->ops->write8(trans, ofs, val);
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index ff856e543ae8..6d73817850ce 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o bt-coex.o
+iwlmvm-y += power.o power_legacy.o bt-coex.o
iwlmvm-y += led.o tt.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index dbd622a3929c..0fad98b85f60 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -220,66 +220,87 @@ static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
- struct iwl_bt_coex_cmd cmd = {
- .max_kill = 5,
- .bt3_time_t7_value = 1,
- .bt3_prio_sample_time = 2,
- .bt3_timer_t2_value = 0xc,
+ struct iwl_bt_coex_cmd *bt_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = BT_CONFIG,
+ .len = { sizeof(*bt_cmd), },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ .flags = CMD_SYNC,
};
int ret;
- cmd.flags = iwlwifi_mod_params.bt_coex_active ?
+ /* go to CALIB state in internal BT-Coex state machine */
+ ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+ if (ret)
+ return ret;
+
+ ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+ if (ret)
+ return ret;
+
+ bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+ if (!bt_cmd)
+ return -ENOMEM;
+ cmd.data[0] = bt_cmd;
+
+ bt_cmd->max_kill = 5;
+ bt_cmd->bt3_time_t7_value = 1;
+ bt_cmd->bt3_prio_sample_time = 2;
+ bt_cmd->bt3_timer_t2_value = 0xc;
+
+ bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
- cmd.flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
+ bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
- cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
- BT_VALID_BT_PRIO_BOOST |
- BT_VALID_MAX_KILL |
- BT_VALID_3W_TMRS |
- BT_VALID_KILL_ACK |
- BT_VALID_KILL_CTS |
- BT_VALID_REDUCED_TX_POWER |
- BT_VALID_LUT);
+ bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
+ BT_VALID_BT_PRIO_BOOST |
+ BT_VALID_MAX_KILL |
+ BT_VALID_3W_TMRS |
+ BT_VALID_KILL_ACK |
+ BT_VALID_KILL_CTS |
+ BT_VALID_REDUCED_TX_POWER |
+ BT_VALID_LUT);
if (mvm->cfg->bt_shared_single_ant)
- memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup,
+ memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
sizeof(iwl_single_shared_ant_lookup));
else if (is_loose_coex())
- memcpy(&cmd.decision_lut, iwl_loose_lookup,
+ memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
sizeof(iwl_tight_lookup));
else
- memcpy(&cmd.decision_lut, iwl_tight_lookup,
+ memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
sizeof(iwl_tight_lookup));
- cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
- cmd.kill_ack_msk =
+ bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
+ bt_cmd->kill_ack_msk =
cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
- cmd.kill_cts_msk =
+ bt_cmd->kill_cts_msk =
cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
- /* go to CALIB state in internal BT-Coex state machine */
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
-
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
- return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
- sizeof(cmd), &cmd);
+ kfree(bt_cmd);
+ return ret;
}
static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
bool reduced_tx_power)
{
enum iwl_bt_kill_msk bt_kill_msk;
- struct iwl_bt_coex_cmd cmd = {};
+ struct iwl_bt_coex_cmd *bt_cmd;
struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
+ struct iwl_host_cmd cmd = {
+ .id = BT_CONFIG,
+ .data[0] = &bt_cmd,
+ .len = { sizeof(*bt_cmd), },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ .flags = CMD_SYNC,
+ };
+ int ret = 0;
lockdep_assert_held(&mvm->mutex);
@@ -308,24 +329,40 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
return 0;
mvm->bt_kill_msk = bt_kill_msk;
- cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
- cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
- cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
+
+ bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+ if (!bt_cmd)
+ return -ENOMEM;
+ cmd.data[0] = bt_cmd;
+
+ bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
+ bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
+ bt_cmd->valid_bit_msk =
+ cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
- return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
- sizeof(cmd), &cmd);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ kfree(bt_cmd);
+ return ret;
}
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
bool enable)
{
- struct iwl_bt_coex_cmd cmd = {
- .valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
- .bt_reduced_tx_power = sta_id,
+ struct iwl_bt_coex_cmd *bt_cmd;
+ /* Send ASYNC since this can be sent from an atomic context */
+ struct iwl_host_cmd cmd = {
+ .id = BT_CONFIG,
+ .len = { sizeof(*bt_cmd), },
+ .dataflags = { IWL_HCMD_DFL_DUP, },
+ .flags = CMD_ASYNC,
};
+
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
+ int ret;
/* This can happen if the station has been removed right now */
if (sta_id == IWL_MVM_STATION_COUNT)
@@ -339,17 +376,26 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
if (mvmsta->bt_reduced_txpower == enable)
return 0;
+ bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
+ if (!bt_cmd)
+ return -ENOMEM;
+ cmd.data[0] = bt_cmd;
+
+ bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
+ bt_cmd->bt_reduced_tx_power = sta_id;
+
if (enable)
- cmd.bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
+ bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
enable ? "en" : "dis", sta_id);
mvmsta->bt_reduced_txpower = enable;
- /* Send ASYNC since this can be sent from an atomic context */
- return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_ASYNC,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ kfree(bt_cmd);
+ return ret;
}
struct iwl_bt_iterator_data {
@@ -384,6 +430,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ /* non associated BSSes aren't to be considered */
+ if (!vif->bss_conf.assoc)
+ return;
+
if (band != IEEE80211_BAND_2GHZ) {
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode);
@@ -523,6 +573,8 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
lockdep_is_held(&mvm->mutex));
mvmsta = (void *)sta->drv_priv;
+ data->num_bss_ifaces++;
+
/*
* This interface doesn't support reduced Tx power (because of low
* RSSI probably), then set bt_kill_msk to default values.
@@ -588,23 +640,5 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct ieee80211_chanctx_conf *chanctx_conf;
- enum ieee80211_band band;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- if (chanctx_conf && chanctx_conf->def.chan)
- band = chanctx_conf->def.chan->band;
- else
- band = -1;
- rcu_read_unlock();
-
- /* if we are in 2GHz we will get a notification from the fw */
- if (band == IEEE80211_BAND_2GHZ)
- return;
-
- /* else, we can remove all the constraints */
- memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
-
iwl_mvm_bt_coex_notif_handle(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
new file mode 100644
index 000000000000..2bf29f7992ee
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __MVM_CONSTANTS_H
+#define __MVM_CONSTANTS_H
+
+#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
+#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
+#define IWL_MVM_PS_SNOOZE_INTERVAL 25
+#define IWL_MVM_PS_SNOOZE_WINDOW 50
+#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
+
+#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 83da884cf303..417639f77b01 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -105,7 +105,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
list_for_each_entry(ifa, &idev->addr_list, if_list) {
mvmvif->target_ipv6_addrs[idx] = ifa->addr;
idx++;
- if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS)
+ if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
break;
}
read_unlock_bh(&idev->lock);
@@ -378,36 +378,68 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- struct iwl_proto_offload_cmd cmd = {};
+ union {
+ struct iwl_proto_offload_cmd_v1 v1;
+ struct iwl_proto_offload_cmd_v2 v2;
+ } cmd = {};
+ struct iwl_proto_offload_cmd_common *common;
+ u32 enabled = 0, size;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
- if (mvmvif->num_target_ipv6_addrs) {
- cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS);
- memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN);
- }
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (mvmvif->num_target_ipv6_addrs) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+ memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
- BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) !=
- sizeof(mvmvif->target_ipv6_addrs[i]));
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
+ memcpy(cmd.v2.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v2.target_ipv6_addr[i]));
+ } else {
+ if (mvmvif->num_target_ipv6_addrs) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+ memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
- for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++)
- memcpy(cmd.target_ipv6_addr[i],
- &mvmvif->target_ipv6_addrs[i],
- sizeof(cmd.target_ipv6_addr[i]));
+ BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
+ memcpy(cmd.v1.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v1.target_ipv6_addr[i]));
+ }
#endif
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ common = &cmd.v2.common;
+ size = sizeof(cmd.v2);
+ } else {
+ common = &cmd.v1.common;
+ size = sizeof(cmd.v1);
+ }
+
if (vif->bss_conf.arp_addr_cnt) {
- cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP);
- cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
- memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN);
+ enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
+ common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+ memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
}
- if (!cmd.enabled)
+ if (!enabled)
return 0;
+ common->enabled = cpu_to_le32(enabled);
+
return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
+ size, &cmd);
}
enum iwl_mvm_tcp_packet_type {
@@ -1077,73 +1109,16 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return __iwl_mvm_suspend(hw, wowlan, false);
}
-static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status *status)
{
- u32 base = mvm->error_event_table;
- struct error_table_start {
- /* cf. struct iwl_error_event_table */
- u32 valid;
- u32 error_id;
- } err_info;
+ struct sk_buff *pkt = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
- struct iwl_host_cmd cmd = {
- .id = WOWLAN_GET_STATUSES,
- .flags = CMD_SYNC | CMD_WANT_SKB,
- };
- struct iwl_wowlan_status *status;
- u32 reasons;
- int ret, len;
- struct sk_buff *pkt = NULL;
-
- iwl_trans_read_mem_bytes(mvm->trans, base,
- &err_info, sizeof(err_info));
-
- if (err_info.valid) {
- IWL_INFO(mvm, "error table is valid (%d)\n",
- err_info.valid);
- if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
- wakeup.rfkill_release = true;
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
- }
- return;
- }
-
- /* only for tracing for now */
- ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
- if (ret)
- IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- if (ret) {
- IWL_ERR(mvm, "failed to query status (%d)\n", ret);
- return;
- }
-
- /* RF-kill already asserted again... */
- if (!cmd.resp_pkt)
- return;
-
- len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- goto out;
- }
-
- status = (void *)cmd.resp_pkt->data;
-
- if (len - sizeof(struct iwl_cmd_header) !=
- sizeof(*status) +
- ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- goto out;
- }
-
- reasons = le32_to_cpu(status->wakeup_reasons);
+ u32 reasons = le32_to_cpu(status->wakeup_reasons);
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
wakeup_report = NULL;
@@ -1206,6 +1181,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
pktsize -= hdrlen;
if (ieee80211_has_protected(hdr->frame_control)) {
+ /*
+ * This is unlocked and using gtk_i(c)vlen,
+ * but since everything is under RTNL still
+ * that's not really a problem - changing
+ * it would be difficult.
+ */
if (is_multicast_ether_addr(hdr->addr1)) {
ivlen = mvm->gtk_ivlen;
icvlen += mvm->gtk_icvlen;
@@ -1256,9 +1237,82 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
report:
ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
kfree_skb(pkt);
+}
- out:
+/* releases the MVM mutex */
+static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 base = mvm->error_event_table;
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ };
+ struct iwl_wowlan_status *status;
+ int ret, len;
+
+ iwl_trans_read_mem_bytes(mvm->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid) {
+ IWL_INFO(mvm, "error table is valid (%d)\n",
+ err_info.valid);
+ if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+ goto out_unlock;
+ }
+
+ /* only for tracing for now */
+ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query status (%d)\n", ret);
+ goto out_unlock;
+ }
+
+ /* RF-kill already asserted again... */
+ if (!cmd.resp_pkt)
+ goto out_unlock;
+
+ len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ goto out_free_resp;
+ }
+
+ status = (void *)cmd.resp_pkt->data;
+
+ if (len - sizeof(struct iwl_cmd_header) !=
+ sizeof(*status) +
+ ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ goto out_free_resp;
+ }
+
+ /* now we have all the data we need, unlock to avoid mac80211 issues */
+ mutex_unlock(&mvm->mutex);
+
+ iwl_mvm_report_wakeup_reasons(mvm, vif, status);
+ iwl_free_resp(&cmd);
+ return;
+
+ out_free_resp:
iwl_free_resp(&cmd);
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1315,10 +1369,13 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_read_d3_sram(mvm);
iwl_mvm_query_wakeup_reasons(mvm, vif);
+ /* has unlocked the mutex, so skip that */
+ goto out;
out_unlock:
mutex_unlock(&mvm->mutex);
+ out:
if (!test && vif)
ieee80211_resume_disconnect(vif);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index c24a744910ac..aac81b8984b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -352,6 +352,10 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
dbgfs_pm->lprx_rssi_threshold = val;
break;
+ case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+ IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+ dbgfs_pm->snooze_ena = val;
+ break;
}
}
@@ -405,6 +409,10 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
POWER_LPRX_RSSI_THRESHOLD_MIN)
return -EINVAL;
param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+ } else if (!strncmp("snooze_enable=", buf, 14)) {
+ if (sscanf(buf + 14, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
} else {
return -EINVAL;
}
@@ -424,40 +432,11 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- struct iwl_powertable_cmd cmd = {};
- char buf[256];
+ char buf[512];
int bufsz = sizeof(buf);
- int pos = 0;
+ int pos;
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
- pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
- le32_to_cpu(cmd.skip_dtim_periods));
- pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
- iwlmvm_mod_params.power_scheme);
- pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
- le16_to_cpu(cmd.flags));
- pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
- cmd.keep_alive_seconds);
-
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
- 1 : 0);
- pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
- le32_to_cpu(cmd.rx_data_timeout));
- pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
- le32_to_cpu(cmd.tx_data_timeout));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- pos += scnprintf(buf+pos, bufsz-pos,
- "lprx_rssi_threshold = %d\n",
- le32_to_cpu(cmd.lprx_rssi_threshold));
- }
+ pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -621,25 +600,160 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
}
#undef BT_MBOX_PRINT
+#define PRINT_STATS_LE32(_str, _val) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ fmt_table, _str, \
+ le32_to_cpu(_val))
+
+static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ static const char *fmt_table = "\t%-30s %10u\n";
+ static const char *fmt_header = "%-32s\n";
+ int pos = 0;
+ char *buf;
+ int ret;
+ int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
+ sizeof(struct mvm_statistics_rx_non_phy) * 10 +
+ sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
+ struct mvm_statistics_rx_phy *ofdm;
+ struct mvm_statistics_rx_phy *cck;
+ struct mvm_statistics_rx_non_phy *general;
+ struct mvm_statistics_rx_ht_phy *ht;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ ofdm = &mvm->rx_stats.ofdm;
+ cck = &mvm->rx_stats.cck;
+ general = &mvm->rx_stats.general;
+ ht = &mvm->rx_stats.ofdm_ht;
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM");
+ PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt);
+ PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt);
+ PRINT_STATS_LE32("plcp_err", ofdm->plcp_err);
+ PRINT_STATS_LE32("crc32_err", ofdm->crc32_err);
+ PRINT_STATS_LE32("overrun_err", ofdm->overrun_err);
+ PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err);
+ PRINT_STATS_LE32("crc32_good", ofdm->crc32_good);
+ PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt);
+ PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt);
+ PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout);
+ PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout);
+ PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts);
+ PRINT_STATS_LE32("rxe_frame_lmt_overrun",
+ ofdm->rxe_frame_limit_overrun);
+ PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt);
+ PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt);
+ PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt);
+ PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill);
+ PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err);
+ PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum);
+ PRINT_STATS_LE32("reserved", ofdm->reserved);
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - CCK");
+ PRINT_STATS_LE32("ina_cnt", cck->ina_cnt);
+ PRINT_STATS_LE32("fina_cnt", cck->fina_cnt);
+ PRINT_STATS_LE32("plcp_err", cck->plcp_err);
+ PRINT_STATS_LE32("crc32_err", cck->crc32_err);
+ PRINT_STATS_LE32("overrun_err", cck->overrun_err);
+ PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err);
+ PRINT_STATS_LE32("crc32_good", cck->crc32_good);
+ PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt);
+ PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt);
+ PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout);
+ PRINT_STATS_LE32("fina_timeout", cck->fina_timeout);
+ PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts);
+ PRINT_STATS_LE32("rxe_frame_lmt_overrun",
+ cck->rxe_frame_limit_overrun);
+ PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt);
+ PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt);
+ PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt);
+ PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill);
+ PRINT_STATS_LE32("mh_format_err", cck->mh_format_err);
+ PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum);
+ PRINT_STATS_LE32("reserved", cck->reserved);
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - GENERAL");
+ PRINT_STATS_LE32("bogus_cts", general->bogus_cts);
+ PRINT_STATS_LE32("bogus_ack", general->bogus_ack);
+ PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames);
+ PRINT_STATS_LE32("filtered_frames", general->filtered_frames);
+ PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons);
+ PRINT_STATS_LE32("channel_beacons", general->channel_beacons);
+ PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon);
+ PRINT_STATS_LE32("adc_rx_saturation_time",
+ general->adc_rx_saturation_time);
+ PRINT_STATS_LE32("ina_detection_search_time",
+ general->ina_detection_search_time);
+ PRINT_STATS_LE32("beacon_silence_rssi_a",
+ general->beacon_silence_rssi_a);
+ PRINT_STATS_LE32("beacon_silence_rssi_b",
+ general->beacon_silence_rssi_b);
+ PRINT_STATS_LE32("beacon_silence_rssi_c",
+ general->beacon_silence_rssi_c);
+ PRINT_STATS_LE32("interference_data_flag",
+ general->interference_data_flag);
+ PRINT_STATS_LE32("channel_load", general->channel_load);
+ PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms);
+ PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a);
+ PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b);
+ PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c);
+ PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a);
+ PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
+ PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
+ PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
+ PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - HT");
+ PRINT_STATS_LE32("plcp_err", ht->plcp_err);
+ PRINT_STATS_LE32("overrun_err", ht->overrun_err);
+ PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err);
+ PRINT_STATS_LE32("crc32_good", ht->crc32_good);
+ PRINT_STATS_LE32("crc32_err", ht->crc32_err);
+ PRINT_STATS_LE32("mh_format_err", ht->mh_format_err);
+ PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good);
+ PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt);
+ PRINT_STATS_LE32("agg_cnt", ht->agg_cnt);
+ PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs);
+
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+
+ return ret;
+}
+#undef PRINT_STAT_LE32
+
static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- bool restart_fw = iwlwifi_mod_params.restart_fw;
int ret;
- iwlwifi_mod_params.restart_fw = true;
-
mutex_lock(&mvm->mutex);
+ /* allow one more restart that we're provoking here */
+ if (mvm->restart_fw >= 0)
+ mvm->restart_fw++;
+
/* take the return value to make compiler happy - it will fail anyway */
ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
mutex_unlock(&mvm->mutex);
- iwlwifi_mod_params.restart_fw = restart_fw;
-
return count;
}
@@ -661,8 +775,14 @@ static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
case MVM_DEBUGFS_BF_ROAMING_STATE:
dbgfs_bf->bf_roaming_state = value;
break;
- case MVM_DEBUGFS_BF_TEMPERATURE_DELTA:
- dbgfs_bf->bf_temperature_delta = value;
+ case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+ dbgfs_bf->bf_temp_threshold = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+ dbgfs_bf->bf_temp_fast_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+ dbgfs_bf->bf_temp_slow_filter = value;
break;
case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
dbgfs_bf->bf_enable_beacon_filter = value;
@@ -721,13 +841,27 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
value > IWL_BF_ROAMING_STATE_MAX)
return -EINVAL;
param = MVM_DEBUGFS_BF_ROAMING_STATE;
- } else if (!strncmp("bf_temperature_delta=", buf, 21)) {
- if (sscanf(buf+21, "%d", &value) != 1)
+ } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+ if (sscanf(buf+18, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+ value > IWL_BF_TEMP_THRESHOLD_MAX)
return -EINVAL;
- if (value < IWL_BF_TEMPERATURE_DELTA_MIN ||
- value > IWL_BF_TEMPERATURE_DELTA_MAX)
+ param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+ } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
return -EINVAL;
- param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA;
+ if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+ value > IWL_BF_TEMP_FAST_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+ } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+ value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
} else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
if (sscanf(buf+24, "%d", &value) != 1)
return -EINVAL;
@@ -769,10 +903,7 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
} else {
- if (mvmvif->bf_enabled)
- ret = iwl_mvm_enable_beacon_filter(mvm, vif);
- else
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
}
mutex_unlock(&mvm->mutex);
@@ -789,41 +920,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_beacon_filter_cmd cmd = {
- .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT,
- .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT,
- .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT,
- .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT,
- .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT,
- .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,
- .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),
- .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT),
- .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT,
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter =
+ cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+ .ba_enable_beacon_abort =
+ cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
};
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- if (mvmvif->bf_enabled)
- cmd.bf_enable_beacon_filter = 1;
+ if (mvmvif->bf_data.bf_enabled)
+ cmd.bf_enable_beacon_filter = cpu_to_le32(1);
else
cmd.bf_enable_beacon_filter = 0;
pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
- cmd.bf_energy_delta);
+ le32_to_cpu(cmd.bf_energy_delta));
pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
- cmd.bf_roaming_energy_delta);
+ le32_to_cpu(cmd.bf_roaming_energy_delta));
pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
- cmd.bf_roaming_state);
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n",
- cmd.bf_temperature_delta);
+ le32_to_cpu(cmd.bf_roaming_state));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+ le32_to_cpu(cmd.bf_temp_threshold));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_fast_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_slow_filter));
pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
- cmd.bf_enable_beacon_filter);
+ le32_to_cpu(cmd.bf_enable_beacon_filter));
pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
- cmd.bf_debug_flag);
+ le32_to_cpu(cmd.bf_debug_flag));
pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
- cmd.bf_escape_timer);
+ le32_to_cpu(cmd.bf_escape_timer));
pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
- cmd.ba_escape_timer);
+ le32_to_cpu(cmd.ba_escape_timer));
pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
- cmd.ba_enable_beacon_abort);
+ le32_to_cpu(cmd.ba_enable_beacon_abort));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -934,6 +1065,7 @@ MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
@@ -957,6 +1089,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 6f8b2c16ae17..df72fcdf8170 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -98,34 +98,63 @@ enum iwl_proto_offloads {
IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
};
-#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS 2
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 6
/**
- * struct iwl_proto_offload_cmd - ARP/NS offload configuration
+ * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
* @enabled: enable flags
* @remote_ipv4_addr: remote address to answer to (or zero if all)
* @host_ipv4_addr: our IPv4 address to respond to queries for
* @arp_mac_addr: our MAC address for ARP responses
- * @remote_ipv6_addr: remote address to answer to (or zero if all)
- * @solicited_node_ipv6_addr: broken -- solicited node address exists
- * for each target address
- * @target_ipv6_addr: our target addresses
- * @ndp_mac_addr: neighbor soliciation response MAC address
+ * @reserved: unused
*/
-struct iwl_proto_offload_cmd {
+struct iwl_proto_offload_cmd_common {
__le32 enabled;
__be32 remote_ipv4_addr;
__be32 host_ipv4_addr;
u8 arp_mac_addr[ETH_ALEN];
- __le16 reserved1;
+ __le16 reserved;
+} __packed;
+/**
+ * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ * for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor soliciation response MAC address
+ */
+struct iwl_proto_offload_cmd_v1 {
+ struct iwl_proto_offload_cmd_common common;
u8 remote_ipv6_addr[16];
u8 solicited_node_ipv6_addr[16];
- u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16];
+ u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
u8 ndp_mac_addr[ETH_ALEN];
__le16 reserved2;
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
+/**
+ * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ * for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor soliciation response MAC address
+ */
+struct iwl_proto_offload_cmd_v2 {
+ struct iwl_proto_offload_cmd_common common;
+ u8 remote_ipv6_addr[16];
+ u8 solicited_node_ipv6_addr[16];
+ u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
+ u8 ndp_mac_addr[ETH_ALEN];
+ u8 numValidIPv6Addresses;
+ u8 reserved2[3];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
+
/*
* WOWLAN_PATTERNS
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index a6da359a80c3..8e7ab41079ca 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -79,6 +79,10 @@
* '1' Driver enables PM (use rest of parameters)
* @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
* '1' PM could sleep over DTIM till listen Interval.
+ * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ * access categories are both delivery and trigger enabled.
+ * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ * PBW Snoozing enabled
* @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
*/
@@ -86,6 +90,8 @@ enum iwl_power_flags {
POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
+ POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5),
+ POWER_FLAGS_BT_SCO_ENA = BIT(8),
POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
};
@@ -93,7 +99,8 @@ enum iwl_power_flags {
#define IWL_POWER_VEC_SIZE 5
/**
- * struct iwl_powertable_cmd - Power Table Command
+ * struct iwl_powertable_cmd - legacy power command. Beside old API support this
+ * is used also with a new power API for device wide power settings.
* POWER_TABLE_CMD = 0x77 (command, has simple generic response)
*
* @flags: Power table command flags from POWER_FLAGS_*
@@ -125,6 +132,76 @@ struct iwl_powertable_cmd {
} __packed;
/**
+ * struct iwl_mac_power_cmd - New power command containing uAPSD support
+ * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
+ * @id_and_color: MAC contex identifier
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM. Keep alive period must be
+ * set regardless of power scheme or current power state.
+ * FW use this value also when PM is disabled.
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @sleep_interval: not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval: Maximum time between attempts to retrieve buffered data
+ * from the AP [msec]
+ * @snooze_window: A window of time in which PBW snoozing insures that all
+ * packets received. It is also the minimum time from last
+ * received unicast RX packet, before client stops snoozing
+ * for data. [msec]
+ * @snooze_step: TBD
+ * @qndp_tid: TID client shall use for uAPSD QNDP triggers
+ * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
+ * each corresponding AC.
+ * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
+ * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
+ * values.
+ * @heavy_tx_thld_packets: TX threshold measured in number of packets
+ * @heavy_rx_thld_packets: RX threshold measured in number of packets
+ * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
+ * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
+ * @limited_ps_threshold:
+*/
+struct iwl_mac_power_cmd {
+ /* CONTEXT_DESC_API_T_VER_1 */
+ __le32 id_and_color;
+
+ /* CLIENT_PM_POWER_TABLE_S_VER_1 */
+ __le16 flags;
+ __le16 keep_alive_seconds;
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 rx_data_timeout_uapsd;
+ __le32 tx_data_timeout_uapsd;
+ u8 lprx_rssi_threshold;
+ u8 skip_dtim_periods;
+ __le16 snooze_interval;
+ __le16 snooze_window;
+ u8 snooze_step;
+ u8 qndp_tid;
+ u8 uapsd_ac_flags;
+ u8 uapsd_max_sp;
+ u8 heavy_tx_thld_packets;
+ u8 heavy_rx_thld_packets;
+ u8 heavy_tx_thld_percentage;
+ u8 heavy_rx_thld_percentage;
+ u8 limited_ps_threshold;
+ u8 reserved;
+} __packed;
+
+/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
* @id_and_color: MAC contex identifier
@@ -143,11 +220,21 @@ struct iwl_powertable_cmd {
* calculated for current beacon is less than the threshold, use
* Roaming Energy Delta Threshold, otherwise use normal Energy Delta
* Threshold. Typical energy threshold is -72dBm.
- * @bf_temperature_delta: Send Beacon to driver if delta in temperature values
- * calculated for this and the last passed beacon is greater than this
- * threshold. Zero value means that the temperature changeis ignored for
+ * @bf_temp_threshold: This threshold determines the type of temperature
+ * filtering (Slow or Fast) that is selected (Units are in Celsuis):
+ * If the current temperature is above this threshold - Fast filter
+ * will be used, If the current temperature is below this threshold -
+ * Slow filter will be used.
+ * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
* beacon filtering; beacons will not be forced to be sent to driver
* regardless of whether its temerature has been changed.
+ * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temerature has been changed.
* @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
* @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
* for a specific period of time. Units: Beacons.
@@ -156,17 +243,17 @@ struct iwl_powertable_cmd {
* @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
*/
struct iwl_beacon_filter_cmd {
- u8 bf_energy_delta;
- u8 bf_roaming_energy_delta;
- u8 bf_roaming_state;
- u8 bf_temperature_delta;
- u8 bf_enable_beacon_filter;
- u8 bf_debug_flag;
- __le16 reserved1;
+ __le32 bf_energy_delta;
+ __le32 bf_roaming_energy_delta;
+ __le32 bf_roaming_state;
+ __le32 bf_temp_threshold;
+ __le32 bf_temp_fast_filter;
+ __le32 bf_temp_slow_filter;
+ __le32 bf_enable_beacon_filter;
+ __le32 bf_debug_flag;
__le32 bf_escape_timer;
__le32 ba_escape_timer;
- u8 ba_enable_beacon_abort;
- u8 reserved2[3];
+ __le32 ba_enable_beacon_abort;
} __packed;
/* Beacon filtering and beacon abort */
@@ -182,9 +269,17 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_ROAMING_STATE_MAX 255
#define IWL_BF_ROAMING_STATE_MIN 0
-#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5
-#define IWL_BF_TEMPERATURE_DELTA_MAX 255
-#define IWL_BF_TEMPERATURE_DELTA_MIN 0
+#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_MAX 255
+#define IWL_BF_TEMP_THRESHOLD_MIN 0
+
+#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_MAX 255
+#define IWL_BF_TEMP_FAST_FILTER_MIN 0
+
+#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
+#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
@@ -194,19 +289,23 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0
-#define IWL_BA_ESCAPE_TIMER_DEFAULT 3
+#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D3 6
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
-#define IWL_BF_CMD_CONFIG_DEFAULTS \
- .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, \
- .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \
- .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, \
- .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, \
- .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, \
- .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
+#define IWL_BF_CMD_CONFIG_DEFAULTS \
+ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_energy_delta = \
+ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \
+ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \
+ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
+ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
+ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \
+ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 365095a0c3b3..83cb9b992ea4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -137,6 +137,8 @@ struct iwl_ssid_ie {
*@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
*@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
*@SCAN_FLAGS_FRAGMENTED_SCAN:
+ *@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
+ * in the past hour, even if they are marked as passive.
*/
enum iwl_scan_flags {
SCAN_FLAGS_PERIODIC_SCAN = BIT(0),
@@ -144,6 +146,7 @@ enum iwl_scan_flags {
SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2),
SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3),
SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4),
+ SCAN_FLAGS_PASSIVE2ACTIVE = BIT(5),
};
/**
@@ -178,7 +181,7 @@ enum iwl_scan_type {
* @quiet_time: in msecs, dwell this time for active scan on quiet channels
* @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
* this number of packets were received (typically 1)
- * @passive2active: is auto switching from passive to active allowed (0 or 1)
+ * @passive2active: is auto switching from passive to active during scan allowed
* @rxchain_sel_flags: RXON_RX_CHAIN_*
* @max_out_time: in usecs, max out of serving channel time
* @suspend_time: how long to pause scan when returning to service channel:
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 700cce731770..d606197bde8f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -91,7 +91,6 @@
* @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
* @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
* @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
- * @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that
* @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
* @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
* @TX_CMD_FLG_EXEC_PAPD: execute PAPD
@@ -120,7 +119,6 @@ enum iwl_tx_flags {
TX_CMD_FLG_RESP_TO_DRV = BIT(21),
TX_CMD_FLG_CCMP_AGG = BIT(22),
TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
- TX_CMD_FLG_CTS_ONLY = BIT(24),
TX_CMD_FLG_DUR = BIT(25),
TX_CMD_FLG_FW_DROP = BIT(26),
TX_CMD_FLG_EXEC_PAPD = BIT(27),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index cbfb3beae783..66264cc5a016 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -136,7 +136,7 @@ enum {
CALIB_RES_NOTIF_PHY_DB = 0x6b,
/* PHY_DB_CMD = 0x6c, */
- /* Power */
+ /* Power - legacy power table command */
POWER_TABLE_CMD = 0x77,
/* Thermal Throttling*/
@@ -159,6 +159,7 @@ enum {
TX_ANT_CONFIGURATION_CMD = 0x98,
BT_CONFIG = 0x9b,
STATISTICS_NOTIFICATION = 0x9d,
+ REDUCE_TX_POWER_CMD = 0x9f,
/* RF-KILL commands and notifications */
CARD_STATE_CMD = 0xa0,
@@ -166,6 +167,9 @@ enum {
MISSED_BEACONS_NOTIFICATION = 0xa2,
+ /* Power - new power table command */
+ MAC_PM_POWER_TABLE = 0xa9,
+
REPLY_RX_PHY_CMD = 0xc0,
REPLY_RX_MPDU_CMD = 0xc1,
BA_NOTIF = 0xc5,
@@ -223,6 +227,19 @@ struct iwl_tx_ant_cfg_cmd {
__le32 valid;
} __packed;
+/**
+ * struct iwl_reduce_tx_power_cmd - TX power reduction command
+ * REDUCE_TX_POWER_CMD = 0x9f
+ * @flags: (reserved for future implementation)
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in dBms.
+ */
+struct iwl_reduce_tx_power_cmd {
+ u8 flags;
+ u8 mac_context_id;
+ __le16 pwr_restriction;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
+
/*
* Calibration control struct.
* Sent as part of the phy configuration command.
@@ -482,71 +499,199 @@ enum iwl_time_event_type {
TE_MAX
}; /* MAC_EVENT_TYPE_API_E_VER_1 */
+
+
+/* Time event - defines for command API v1 */
+
+/*
+ * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+ TE_V1_FRAG_NONE = 0,
+ TE_V1_FRAG_SINGLE = 1,
+ TE_V1_FRAG_DUAL = 2,
+ TE_V1_FRAG_ENDLESS = 0xffffffff
+};
+
+/* If a Time Event can be fragmented, this is the max number of fragments */
+#define TE_V1_FRAG_MAX_MSK 0x0fffffff
+/* Repeat the time event endlessly (until removed) */
+#define TE_V1_REPEAT_ENDLESS 0xffffffff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
+
/* Time Event dependencies: none, on another TE, or in a specific time */
enum {
- TE_INDEPENDENT = 0,
- TE_DEP_OTHER = 1,
- TE_DEP_TSF = 2,
- TE_EVENT_SOCIOPATHIC = 4,
+ TE_V1_INDEPENDENT = 0,
+ TE_V1_DEP_OTHER = BIT(0),
+ TE_V1_DEP_TSF = BIT(1),
+ TE_V1_EVENT_SOCIOPATHIC = BIT(2),
}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
+
/*
+ * @TE_V1_NOTIF_NONE: no notifications
+ * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ *
* Supported Time event notifications configuration.
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
- *
- * @TE_NOTIF_NONE: no notifications
- * @TE_NOTIF_HOST_EVENT_START: request/receive notification on event start
- * @TE_NOTIF_HOST_EVENT_END:request/receive notification on event end
- * @TE_NOTIF_INTERNAL_EVENT_START: internal FW use
- * @TE_NOTIF_INTERNAL_EVENT_END: internal FW use.
- * @TE_NOTIF_HOST_FRAG_START: request/receive notification on frag start
- * @TE_NOTIF_HOST_FRAG_END:request/receive notification on frag end
- * @TE_NOTIF_INTERNAL_FRAG_START: internal FW use.
- * @TE_NOTIF_INTERNAL_FRAG_END: internal FW use.
*/
enum {
- TE_NOTIF_NONE = 0,
- TE_NOTIF_HOST_EVENT_START = 0x1,
- TE_NOTIF_HOST_EVENT_END = 0x2,
- TE_NOTIF_INTERNAL_EVENT_START = 0x4,
- TE_NOTIF_INTERNAL_EVENT_END = 0x8,
- TE_NOTIF_HOST_FRAG_START = 0x10,
- TE_NOTIF_HOST_FRAG_END = 0x20,
- TE_NOTIF_INTERNAL_FRAG_START = 0x40,
- TE_NOTIF_INTERNAL_FRAG_END = 0x80
+ TE_V1_NOTIF_NONE = 0,
+ TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
+ TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
+ TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
+ TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
+ TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
+ TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
+ TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
+ TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
+
+/**
+ * struct iwl_time_event_cmd_api_v1 - configuring Time Events
+ * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
+ * with version 2. determined by IWL_UCODE_TLV_FLAGS)
+ * ( TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @interval_reciprocal: 2^32 / interval
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
+ * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
+ * and TE_V1_EVENT_SOCIOPATHIC
+ * @is_present: 0 or 1, are we present or absent during the Time Event
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
+ */
+struct iwl_time_event_cmd_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 id;
+ /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
+ __le32 apply_time;
+ __le32 max_delay;
+ __le32 dep_policy;
+ __le32 depends_on;
+ __le32 is_present;
+ __le32 max_frags;
+ __le32 interval;
+ __le32 interval_reciprocal;
+ __le32 duration;
+ __le32 repeat;
+ __le32 notify;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
+
+
+/* Time event - defines for command API v2 */
+
/*
- * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed.
- * @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
* the first fragment is scheduled.
- * @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
* the first 2 fragments are scheduled.
- * @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number
- * of fragments are valid.
+ * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
*
* Other than the constant defined above, specifying a fragmentation value 'x'
* means that the event can be fragmented but only the first 'x' will be
* scheduled.
*/
enum {
- TE_FRAG_NONE = 0,
- TE_FRAG_SINGLE = 1,
- TE_FRAG_DUAL = 2,
- TE_FRAG_ENDLESS = 0xffffffff
+ TE_V2_FRAG_NONE = 0,
+ TE_V2_FRAG_SINGLE = 1,
+ TE_V2_FRAG_DUAL = 2,
+ TE_V2_FRAG_MAX = 0xfe,
+ TE_V2_FRAG_ENDLESS = 0xff
};
/* Repeat the time event endlessly (until removed) */
-#define TE_REPEAT_ENDLESS (0xffffffff)
+#define TE_V2_REPEAT_ENDLESS 0xff
/* If a Time Event has bounded repetitions, this is the maximal value */
-#define TE_REPEAT_MAX_MSK (0x0fffffff)
-/* If a Time Event can be fragmented, this is the max number of fragments */
-#define TE_FRAG_MAX_MSK (0x0fffffff)
+#define TE_V2_REPEAT_MAX 0xfe
+
+#define TE_V2_PLACEMENT_POS 12
+#define TE_V2_ABSENCE_POS 15
+
+/* Time event policy values (for time event cmd api v2)
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ *
+ * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
+ * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @TE_V2_DEP_OTHER: depends on another time event
+ * @TE_V2_DEP_TSF: depends on a specific time
+ * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
+ * @TE_V2_ABSENCE: are we present or absent during the Time Event.
+ */
+enum {
+ TE_V2_DEFAULT_POLICY = 0x0,
+
+ /* notifications (event start/stop, fragment start/stop) */
+ TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
+ TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
+ TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
+ TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
+
+ TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
+ TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
+ TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
+ TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
+
+ TE_V2_NOTIF_MSK = 0xff,
+
+ /* placement characteristics */
+ TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
+ TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
+ TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
+
+ /* are we present or absent during the Time Event. */
+ TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
+};
/**
- * struct iwl_time_event_cmd - configuring Time Events
+ * struct iwl_time_event_cmd_api_v2 - configuring Time Events
+ * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
+ * with version 1. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
* @id_and_color: ID and color of the relevant MAC
* @action: action to perform, one of FW_CTXT_ACTION_*
@@ -558,32 +703,30 @@ enum {
* @max_delay: maximum delay to event's start (apply time), in TU
* @depends_on: the unique ID of the event we depend on (if any)
* @interval: interval between repetitions, in TU
- * @interval_reciprocal: 2^32 / interval
* @duration: duration of event in TU
* @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
- * @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
- * @is_present: 0 or 1, are we present or absent during the Time Event
* @max_frags: maximal number of fragments the Time Event can be divided to
- * @notify: notifications using TE_NOTIF_* (whom to notify when)
+ * @policy: defines whether uCode shall notify the host or other uCode modules
+ * on event and/or fragment start and/or end
+ * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
+ * TE_EVENT_SOCIOPATHIC
+ * using TE_ABSENCE and using TE_NOTIF_*
*/
-struct iwl_time_event_cmd {
+struct iwl_time_event_cmd_v2 {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
__le32 id;
- /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
+ /* MAC_TIME_EVENT_DATA_API_S_VER_2 */
__le32 apply_time;
__le32 max_delay;
- __le32 dep_policy;
__le32 depends_on;
- __le32 is_present;
- __le32 max_frags;
__le32 interval;
- __le32 interval_reciprocal;
__le32 duration;
- __le32 repeat;
- __le32 notify;
-} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
+ u8 repeat;
+ u8 max_frags;
+ __le16 policy;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
/**
* struct iwl_time_event_resp - response structure to iwl_time_event_cmd
@@ -765,6 +908,14 @@ struct iwl_phy_context_cmd {
} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
#define IWL_RX_INFO_PHY_CNT 8
+#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
+#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
+
#define IWL_RX_INFO_AGC_IDX 1
#define IWL_RX_INFO_RSSI_AB_IDX 2
#define IWL_OFDM_AGC_A_MSK 0x0000007f
@@ -1170,7 +1321,7 @@ struct mvm_statistics_general {
struct mvm_statistics_general_common common;
__le32 beacon_filtered;
__le32 missed_beacons;
- __s8 beacon_filter_everage_energy;
+ __s8 beacon_filter_average_energy;
__s8 beacon_filter_reason;
__s8 beacon_filter_current_energy;
__s8 beacon_filter_reserved;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index cd7c0032cc58..c76299a3a1e0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -78,22 +78,6 @@
#define UCODE_VALID_OK cpu_to_le32(0x1)
-/* Default calibration values for WkP - set to INIT image w/o running */
-static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
-static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
-
-struct iwl_calib_default_data {
- u16 size;
- void *data;
-};
-
-#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
-
-static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
- [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
- [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
-};
-
struct iwl_mvm_alive_data {
bool valid;
u32 scd_base_addr;
@@ -248,40 +232,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}
-static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
-{
- u8 cmd_raw[16]; /* holds the variable size commands */
- struct iwl_set_calib_default_cmd *cmd =
- (struct iwl_set_calib_default_cmd *)cmd_raw;
- int ret, i;
-
- /* Setting default values for calibrations we don't run */
- for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) {
- u16 cmd_len;
-
- if (wkp_calib_default_data[i].size == 0)
- continue;
-
- memset(cmd_raw, 0, sizeof(cmd_raw));
- cmd_len = wkp_calib_default_data[i].size + sizeof(cmd);
- cmd->calib_index = cpu_to_le16(i);
- cmd->length = cpu_to_le16(wkp_calib_default_data[i].size);
- if (WARN_ONCE(cmd_len > sizeof(cmd_raw),
- "Need to enlarge cmd_raw to %d\n", cmd_len))
- break;
- memcpy(cmd->data, wkp_calib_default_data[i].data,
- wkp_calib_default_data[i].size);
- ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0,
- sizeof(*cmd) +
- wkp_calib_default_data[i].size,
- cmd);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
struct iwl_notification_wait calib_wait;
@@ -342,11 +292,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
if (ret)
goto error;
- /* need to set default values */
- ret = iwl_set_default_calibrations(mvm);
- if (ret)
- goto error;
-
/*
* Send phy configurations command to init uCode
* to start the 16.0 uCode init image internal calibrations.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 94aae9c8562c..5fe23a5ea9b6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -264,7 +264,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0;
/* Therefore, in recovery, we can't get here */
- WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+ if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+ return -EBUSY;
mvmvif->id = find_first_bit(data.available_mac_ids,
NUM_MAC_INDEX_DRIVER);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f19baf0dea6b..9833cdf6177c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -153,7 +153,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TIMING_BEACON_ONLY |
- IEEE80211_HW_CONNECTION_MONITOR;
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+ IEEE80211_HW_SUPPORTS_UAPSD;
hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -188,6 +191,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_remain_on_channel_duration = 10000;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+ hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+ hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
/* Extract MAC address */
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -506,7 +511,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- /* Allocate resources for the MAC context, and add it the the fw */
+ /* Allocate resources for the MAC context, and add it to the fw */
ret = iwl_mvm_mac_ctxt_init(mvm, vif);
if (ret)
goto out_unlock;
@@ -552,6 +557,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_release;
}
+ iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
}
@@ -566,16 +572,18 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
iwl_mvm_power_update_mode(mvm, vif);
/* beacon filtering */
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ if (ret)
+ goto out_remove_mac;
+
if (!mvm->bf_allowed_vif &&
- vif->type == NL80211_IFTYPE_STATION && !vif->p2p){
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
mvm->bf_allowed_vif = mvmvif;
- vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
- if (ret)
- goto out_release;
-
/*
* P2P_DEVICE interface does not have a channel context assigned to it,
* so a dedicated PHY context is allocated to it and the corresponding
@@ -586,7 +594,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
if (!mvmvif->phy_ctxt) {
ret = -ENOSPC;
- goto out_remove_mac;
+ goto out_free_bf;
}
iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
@@ -610,6 +618,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
iwl_mvm_binding_remove_vif(mvm, vif);
out_unref_phy:
iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+ out_free_bf:
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ }
out_remove_mac:
mvmvif->phy_ctxt = NULL;
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -674,7 +688,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->bf_allowed_vif == mvmvif) {
mvm->bf_allowed_vif = NULL;
- vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
}
iwl_mvm_vif_dbgfs_clean(mvm, vif);
@@ -719,6 +734,20 @@ out_release:
mutex_unlock(&mvm->mutex);
}
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ s8 tx_power)
+{
+ /* FW is in charge of regulatory enforcement */
+ struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
+ .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
+ .pwr_restriction = cpu_to_le16(tx_power),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+ sizeof(reduce_txpwr_cmd),
+ &reduce_txpwr_cmd);
+}
+
static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
{
return 0;
@@ -766,7 +795,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update quotas\n");
return;
}
- iwl_mvm_bt_coex_vif_assoc(mvm, vif);
iwl_mvm_configure_mcast_filter(mvm, vif);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/* remove AP station now that the MAC is unassoc */
@@ -779,9 +807,19 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
}
- ret = iwl_mvm_power_update_mode(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update power mode\n");
+
+ /* reset rssi values */
+ mvmvif->bf_data.ave_beacon_signal = 0;
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
+ /* Workaround for FW bug, otherwise FW disables device
+ * power save upon disassociation
+ */
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
+ iwl_mvm_bt_coex_vif_assoc(mvm, vif);
} else if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
@@ -789,11 +827,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
- } else if (changes & BSS_CHANGED_PS) {
+ } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
+ if (changes & BSS_CHANGED_TXPOWER) {
+ IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
+ bss_conf->txpower);
+ iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+ }
+
+ if (changes & BSS_CHANGED_CQM) {
+ IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
+ /* reset cqm events tracking */
+ mvmvif->bf_data.last_cqm_event = 0;
+ ret = iwl_mvm_update_beacon_filter(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update CQM thresholds\n");
+ }
}
static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 420e82d379d9..b0389279cc1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -76,6 +76,7 @@
#include "iwl-trans.h"
#include "sta.h"
#include "fw-api.h"
+#include "constants.h"
#define IWL_INVALID_MAC80211_QUEUE 0xff
#define IWL_MVM_MAX_ADDRESSES 5
@@ -91,6 +92,9 @@ enum iwl_mvm_tx_fifo {
};
extern struct ieee80211_ops iwl_mvm_hw_ops;
+extern const struct iwl_mvm_power_ops pm_legacy_ops;
+extern const struct iwl_mvm_power_ops pm_mac_ops;
+
/**
* struct iwl_mvm_mod_params - module parameters for iwlmvm
* @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
@@ -149,6 +153,22 @@ enum iwl_power_scheme {
};
#define IWL_CONN_MAX_LISTEN_INTERVAL 70
+#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
+
+struct iwl_mvm_power_ops {
+ int (*power_update_mode)(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+ int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ char *buf, int bufsz);
+#endif
+};
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask {
@@ -160,10 +180,11 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
+ MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
};
struct iwl_dbgfs_pm {
- u8 keep_alive_seconds;
+ u16 keep_alive_seconds;
u32 rx_data_timeout;
u32 tx_data_timeout;
bool skip_over_dtim;
@@ -171,6 +192,7 @@ struct iwl_dbgfs_pm {
bool disable_power_off;
bool lprx_ena;
u32 lprx_rssi_threshold;
+ bool snooze_ena;
int mask;
};
@@ -180,24 +202,28 @@ enum iwl_dbgfs_bf_mask {
MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
- MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3),
- MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4),
- MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5),
- MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6),
- MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7),
- MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8),
+ MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
+ MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
+ MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
+ MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
+ MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
+ MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
+ MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
+ MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
};
struct iwl_dbgfs_bf {
- u8 bf_energy_delta;
- u8 bf_roaming_energy_delta;
- u8 bf_roaming_state;
- u8 bf_temperature_delta;
- u8 bf_enable_beacon_filter;
- u8 bf_debug_flag;
+ u32 bf_energy_delta;
+ u32 bf_roaming_energy_delta;
+ u32 bf_roaming_state;
+ u32 bf_temp_threshold;
+ u32 bf_temp_fast_filter;
+ u32 bf_temp_slow_filter;
+ u32 bf_enable_beacon_filter;
+ u32 bf_debug_flag;
u32 bf_escape_timer;
u32 ba_escape_timer;
- u8 ba_enable_beacon_abort;
+ u32 ba_enable_beacon_abort;
int mask;
};
#endif
@@ -209,6 +235,21 @@ enum iwl_mvm_smps_type_request {
};
/**
+* struct iwl_mvm_vif_bf_data - beacon filtering related data
+* @bf_enabled: indicates if beacon filtering is enabled
+* @ba_enabled: indicated if beacon abort is enabled
+* @last_beacon_signal: last beacon rssi signal in dbm
+* @ave_beacon_signal: average beacon signal
+* @last_cqm_event: rssi of the last cqm event
+*/
+struct iwl_mvm_vif_bf_data {
+ bool bf_enabled;
+ bool ba_enabled;
+ s8 ave_beacon_signal;
+ s8 last_cqm_event;
+};
+
+/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @id: between 0 and 3
* @color: to solve races upon MAC addition and removal
@@ -233,8 +274,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_active;
bool monitor_active;
- /* indicate whether beacon filtering is enabled */
- bool bf_enabled;
+ struct iwl_mvm_vif_bf_data bf_data;
u32 ap_beacon_time;
@@ -268,7 +308,7 @@ struct iwl_mvm_vif {
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 addresses for WoWLAN */
- struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS];
+ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
int num_target_ipv6_addrs;
#endif
#endif
@@ -402,6 +442,8 @@ struct iwl_mvm {
struct iwl_notif_wait_data notif_wait;
+ struct mvm_statistics_rx rx_stats;
+
unsigned long transport_queue_stop;
u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
@@ -459,6 +501,9 @@ struct iwl_mvm {
*/
u8 vif_count;
+ /* -1 for always, 0 for never, >0 for that many times */
+ s8 restart_fw;
+
struct led_classdev led;
struct ieee80211_vif *p2p_device_vif;
@@ -482,6 +527,8 @@ struct iwl_mvm {
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
+
+ const struct iwl_mvm_power_ops *pm_ops;
};
/* Extract MVM priv from op_mode and _hw */
@@ -525,6 +572,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
enum ieee80211_band band);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -660,10 +708,26 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
u8 flags, bool init);
/* power managment */
-int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_powertable_cmd *cmd);
+static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ return mvm->pm_ops->power_update_mode(mvm, vif);
+}
+
+static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ return mvm->pm_ops->power_disable(mvm, vif);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ char *buf, int bufsz)
+{
+ return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
+}
+#endif
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@@ -707,6 +771,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_beacon_filter_cmd *cmd);
+int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool enable);
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index af79a14063a9..2fcc8ef88a68 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -275,6 +275,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BEACON_NOTIFICATION),
CMD(BEACON_TEMPLATE_CMD),
CMD(STATISTICS_NOTIFICATION),
+ CMD(REDUCE_TX_POWER_CMD),
CMD(TX_ANT_CONFIGURATION_CMD),
CMD(D3_CONFIG_CMD),
CMD(PROT_OFFLOAD_CONFIG_CMD),
@@ -301,6 +302,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(MCAST_FILTER_CMD),
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
+ CMD(MAC_PM_POWER_TABLE),
};
#undef CMD
@@ -340,6 +342,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw = fw;
mvm->hw = hw;
+ mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
+
mutex_init(&mvm->mutex);
spin_lock_init(&mvm->async_handlers_lock);
INIT_LIST_HEAD(&mvm->time_event_list);
@@ -431,6 +435,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_unregister;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)
+ mvm->pm_ops = &pm_mac_ops;
+ else
+ mvm->pm_ops = &pm_legacy_ops;
+
+ memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+
return op_mode;
out_unregister:
@@ -638,6 +649,22 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
ieee80211_free_txskb(mvm->hw, skb);
}
+struct iwl_mvm_reprobe {
+ struct device *dev;
+ struct work_struct work;
+};
+
+static void iwl_mvm_reprobe_wk(struct work_struct *wk)
+{
+ struct iwl_mvm_reprobe *reprobe;
+
+ reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
+ if (device_reprobe(reprobe->dev))
+ dev_err(reprobe->dev, "reprobe failed!\n");
+ kfree(reprobe);
+ module_put(THIS_MODULE);
+}
+
static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
{
iwl_abort_notification_waits(&mvm->notif_wait);
@@ -649,9 +676,30 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
* can't recover this since we're already half suspended.
*/
if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n");
- } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
- iwlwifi_mod_params.restart_fw) {
+ struct iwl_mvm_reprobe *reprobe;
+
+ IWL_ERR(mvm,
+ "Firmware error during reconfiguration - reprobe!\n");
+
+ /*
+ * get a module reference to avoid doing this while unloading
+ * anyway and to avoid scheduling a work with code that's
+ * being removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(mvm, "Module is being unloaded - abort\n");
+ return;
+ }
+
+ reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
+ if (!reprobe) {
+ module_put(THIS_MODULE);
+ return;
+ }
+ reprobe->dev = mvm->trans->dev;
+ INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
+ schedule_work(&reprobe->work);
+ } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
/*
* This is a bit racy, but worst case we tell mac80211 about
* a stopped/aborted (sched) scan when that was already done
@@ -669,6 +717,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
break;
}
+ if (mvm->restart_fw > 0)
+ mvm->restart_fw--;
ieee80211_restart_hw(mvm->hw);
}
}
@@ -678,6 +728,8 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
iwl_mvm_dump_nic_error_log(mvm);
+ if (!mvm->restart_fw)
+ iwl_mvm_dump_sram(mvm);
iwl_mvm_nic_restart(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index e7ca965a89b8..21407a353a3b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -75,8 +75,8 @@
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
-static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd)
+int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_beacon_filter_cmd *cmd)
{
int ret;
@@ -85,69 +85,110 @@ static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
if (!ret) {
IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
- cmd->ba_enable_beacon_abort);
+ le32_to_cpu(cmd->ba_enable_beacon_abort));
IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
- cmd->ba_escape_timer);
+ le32_to_cpu(cmd->ba_escape_timer));
IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
- cmd->bf_debug_flag);
+ le32_to_cpu(cmd->bf_debug_flag));
IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
- cmd->bf_enable_beacon_filter);
+ le32_to_cpu(cmd->bf_enable_beacon_filter));
IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
- cmd->bf_energy_delta);
+ le32_to_cpu(cmd->bf_energy_delta));
IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
- cmd->bf_escape_timer);
+ le32_to_cpu(cmd->bf_escape_timer));
IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
- cmd->bf_roaming_energy_delta);
+ le32_to_cpu(cmd->bf_roaming_energy_delta));
IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
- cmd->bf_roaming_state);
- IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n",
- cmd->bf_temperature_delta);
+ le32_to_cpu(cmd->bf_roaming_state));
+ IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+ le32_to_cpu(cmd->bf_temp_threshold));
+ IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_fast_filter));
+ IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_slow_filter));
}
return ret;
}
-static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool enable)
+static
+void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->bss_conf.cqm_rssi_thold) {
+ cmd->bf_energy_delta =
+ cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
+ /* fw uses an absolute value for this */
+ cmd->bf_roaming_state =
+ cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
+ }
+ cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
+}
+
+int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool enable)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = 1,
- .ba_enable_beacon_abort = enable,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ .ba_enable_beacon_abort = cpu_to_le32(enable),
};
- if (!mvmvif->bf_enabled)
+ if (!mvmvif->bf_data.bf_enabled)
return 0;
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
+ cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+ mvmvif->bf_data.ba_enabled = enable;
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
- struct iwl_powertable_cmd *cmd)
+ struct iwl_mac_power_cmd *cmd)
{
IWL_DEBUG_POWER(mvm,
- "Sending power table command for power level %d, flags = 0x%X\n",
- iwlmvm_mod_params.power_scheme,
+ "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+ cmd->id_and_color, iwlmvm_mod_params.power_scheme,
le16_to_cpu(cmd->flags));
- IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
-
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
- le32_to_cpu(cmd->tx_data_timeout));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
- IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
- le32_to_cpu(cmd->skip_dtim_periods));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
- le32_to_cpu(cmd->lprx_rssi_threshold));
+ IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
+ le16_to_cpu(cmd->keep_alive_seconds));
+
+ if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
+ IWL_DEBUG_POWER(mvm, "Disable power management\n");
+ return;
+ }
+
+ IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout));
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+ IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+ cmd->skip_dtim_periods);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+ cmd->lprx_rssi_threshold);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+ IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
+ IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
+ IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
+ IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
}
}
-void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_powertable_cmd *cmd)
+static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd)
{
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -157,20 +198,29 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
+ enum ieee80211_ac_numbers ac;
+ bool tid_found = false;
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ dtimper = hw->conf.ps_dtim_period ?: 1;
/*
* Regardless of power management state the driver must set
* keep alive period. FW will use it for sending keep alive NDPs
- * immediately after association.
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM
*/
- cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
+ dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+ keep_alive = max_t(int, 3 * dtimper_msec,
+ MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
+ keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+ cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.assoc)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
@@ -186,12 +236,9 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(vif->bss_conf.beacon_rate->bitrate == 10 ||
vif->bss_conf.beacon_rate->bitrate == 60)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- cmd->lprx_rssi_threshold =
- cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
+ cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
}
- dtimper = hw->conf.ps_dtim_period ?: 1;
-
/* Check if radar detection is required on current channel */
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->chanctx_conf);
@@ -207,27 +254,82 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- cmd->skip_dtim_periods = cpu_to_le32(3);
+ cmd->skip_dtim_periods = 3;
}
- /* Check that keep alive period is at least 3 * DTIM */
- dtimper_msec = dtimper * vif->bss_conf.beacon_int;
- keep_alive = max_t(int, 3 * dtimper_msec,
- MSEC_PER_SEC * cmd->keep_alive_seconds);
- keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
- cmd->keep_alive_seconds = keep_alive;
-
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
- cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
} else {
- cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ }
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+ if (!mvmvif->queue_params[ac].uapsd)
+ continue;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+ cmd->uapsd_ac_flags |= BIT(ac);
+
+ /* QNDP TID - the highest TID with no admission control */
+ if (!tid_found && !mvmvif->queue_params[ac].acm) {
+ tid_found = true;
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ cmd->qndp_tid = 6;
+ break;
+ case IEEE80211_AC_VI:
+ cmd->qndp_tid = 5;
+ break;
+ case IEEE80211_AC_BE:
+ cmd->qndp_tid = 0;
+ break;
+ case IEEE80211_AC_BK:
+ cmd->qndp_tid = 1;
+ break;
+ }
+ }
+ }
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+
+ if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+ BIT(IEEE80211_AC_VI) |
+ BIT(IEEE80211_AC_BE) |
+ BIT(IEEE80211_AC_BK))) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ cmd->snooze_interval =
+ cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+ cmd->snooze_window =
+ (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
+ cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+ cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+ }
+
+ cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ cmd->heavy_tx_thld_percentage =
+ IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+ cmd->heavy_rx_thld_percentage =
+ IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
- cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
+ cmd->keep_alive_seconds =
+ cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
if (mvmvif->dbgfs_pm.skip_over_dtim)
cmd->flags |=
@@ -243,8 +345,7 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->tx_data_timeout =
cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
- cmd->skip_dtim_periods =
- cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
+ cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
if (mvmvif->dbgfs_pm.lprx_ena)
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
@@ -252,16 +353,24 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
}
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
- cmd->lprx_rssi_threshold =
- cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
+ cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
+ if (mvmvif->dbgfs_pm.snooze_ena)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ else
+ cmd->flags &=
+ cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
+ }
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
-int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
int ret;
bool ba_enable;
- struct iwl_powertable_cmd cmd = {};
+ struct iwl_mac_power_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
@@ -280,7 +389,7 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
- ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
return ret;
@@ -291,15 +400,19 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
}
-int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
- struct iwl_powertable_cmd cmd = {};
+ struct iwl_mac_power_cmd cmd = {};
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -310,11 +423,98 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#endif
iwl_mvm_power_log(mvm, &cmd);
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
sizeof(cmd), &cmd);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
+{
+ struct iwl_mac_power_cmd cmd = {};
+ int pos = 0;
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+ 0 : 1);
+ pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+ iwlmvm_mod_params.power_scheme);
+ pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+ le16_to_cpu(cmd.flags));
+ pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+ le16_to_cpu(cmd.keep_alive_seconds));
+
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
+ 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ cmd.skip_dtim_periods);
+ if (!(cmd.flags &
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ }
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "lprx_rssi_threshold = %d\n",
+ cmd.lprx_rssi_threshold);
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+ pos +=
+ scnprintf(buf+pos, bufsz-pos,
+ "rx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout_uapsd));
+ pos +=
+ scnprintf(buf+pos, bufsz-pos,
+ "tx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
+ cmd.qndp_tid);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "uapsd_ac_flags = 0x%x\n",
+ cmd.uapsd_ac_flags);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "uapsd_max_sp = %d\n",
+ cmd.uapsd_max_sp);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "heavy_tx_thld_packets = %d\n",
+ cmd.heavy_tx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "heavy_rx_thld_packets = %d\n",
+ cmd.heavy_rx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "heavy_tx_thld_percentage = %d\n",
+ cmd.heavy_tx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "heavy_rx_thld_percentage = %d\n",
+ cmd.heavy_rx_thld_percentage);
+ pos +=
+ scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
+ 1 : 0);
+ }
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "snooze_interval = %d\n",
+ cmd.snooze_interval);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "snooze_window = %d\n",
+ cmd.snooze_window);
+ }
+ }
+ return pos;
+}
+
void
iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd)
@@ -323,22 +523,30 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
- cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta;
+ cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
cmd->bf_roaming_energy_delta =
- dbgfs_bf->bf_roaming_energy_delta;
+ cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
- cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state;
- if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA)
- cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta;
+ cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
+ cmd->bf_temp_threshold =
+ cpu_to_le32(dbgfs_bf->bf_temp_threshold);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
+ cmd->bf_temp_fast_filter =
+ cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
+ cmd->bf_temp_slow_filter =
+ cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
- cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag;
+ cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
- cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort;
+ cmd->ba_enable_beacon_abort =
+ cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
}
#endif
@@ -348,7 +556,7 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = 1,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
};
int ret;
@@ -356,11 +564,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
- mvmvif->bf_enabled = true;
+ mvmvif->bf_data.bf_enabled = true;
return ret;
}
@@ -372,13 +581,33 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
+ vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
- mvmvif->bf_enabled = false;
+ mvmvif->bf_data.bf_enabled = false;
return ret;
}
+
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!mvmvif->bf_data.bf_enabled)
+ return 0;
+
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+}
+
+const struct iwl_mvm_power_ops pm_mac_ops = {
+ .power_update_mode = iwl_mvm_power_mac_update_mode,
+ .power_disable = iwl_mvm_power_mac_disable,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
new file mode 100644
index 000000000000..2ce79bad5845
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
@@ -0,0 +1,319 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "mvm.h"
+#include "iwl-modparams.h"
+#include "fw-api-power.h"
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+static void iwl_mvm_power_log(struct iwl_mvm *mvm,
+ struct iwl_powertable_cmd *cmd)
+{
+ IWL_DEBUG_POWER(mvm,
+ "Sending power table command for power level %d, flags = 0x%X\n",
+ iwlmvm_mod_params.power_scheme,
+ le16_to_cpu(cmd->flags));
+ IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+ IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout));
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+ IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+ le32_to_cpu(cmd->skip_dtim_periods));
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+ le32_to_cpu(cmd->lprx_rssi_threshold));
+ }
+}
+
+static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_powertable_cmd *cmd)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_channel *chan;
+ int dtimper, dtimper_msec;
+ int keep_alive;
+ bool radar_detect = false;
+ struct iwl_mvm_vif *mvmvif __maybe_unused =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ /*
+ * Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association.
+ */
+ cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+ if (!vif->bss_conf.assoc)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+ mvmvif->dbgfs_pm.disable_power_off)
+ cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+ if (!vif->bss_conf.ps)
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+ if (vif->bss_conf.beacon_rate &&
+ (vif->bss_conf.beacon_rate->bitrate == 10 ||
+ vif->bss_conf.beacon_rate->bitrate == 60)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ cmd->lprx_rssi_threshold =
+ cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
+ }
+
+ dtimper = hw->conf.ps_dtim_period ?: 1;
+
+ /* Check if radar detection is required on current channel */
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ WARN_ON(!chanctx_conf);
+ if (chanctx_conf) {
+ chan = chanctx_conf->def.chan;
+ radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
+ }
+ rcu_read_unlock();
+
+ /* Check skip over DTIM conditions */
+ if (!radar_detect && (dtimper <= 10) &&
+ (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
+ mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd->skip_dtim_periods = cpu_to_le32(3);
+ }
+
+ /* Check that keep alive period is at least 3 * DTIM */
+ dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+ keep_alive = max_t(int, 3 * dtimper_msec,
+ MSEC_PER_SEC * cmd->keep_alive_seconds);
+ keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+ cmd->keep_alive_seconds = keep_alive;
+
+ if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
+ cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+ cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+ } else {
+ cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+ cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
+ cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
+ if (mvmvif->dbgfs_pm.skip_over_dtim)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ else
+ cmd->flags &=
+ cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
+ cmd->rx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
+ cmd->tx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
+ cmd->skip_dtim_periods =
+ cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
+ if (mvmvif->dbgfs_pm.lprx_ena)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ else
+ cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
+ cmd->lprx_rssi_threshold =
+ cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+}
+
+static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ int ret;
+ bool ba_enable;
+ struct iwl_powertable_cmd cmd = {};
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ /*
+ * TODO: The following vif_count verification is temporary condition.
+ * Avoid power mode update if more than one interface is currently
+ * active. Remove this condition when FW will support power management
+ * on multiple MACs.
+ */
+ IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
+ mvm->vif_count);
+ if (mvm->vif_count > 1)
+ return 0;
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ iwl_mvm_power_log(mvm, &cmd);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ ba_enable = !!(cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
+
+ return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
+}
+
+static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_powertable_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif __maybe_unused =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+ mvmvif->dbgfs_pm.disable_power_off)
+ cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+ iwl_mvm_power_log(mvm, &cmd);
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
+ sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
+{
+ struct iwl_powertable_cmd cmd = {};
+ int pos = 0;
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+ 0 : 1);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ le32_to_cpu(cmd.skip_dtim_periods));
+ pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+ iwlmvm_mod_params.power_scheme);
+ pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+ le16_to_cpu(cmd.flags));
+ pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+ cmd.keep_alive_seconds);
+
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
+ 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "lprx_rssi_threshold = %d\n",
+ le32_to_cpu(cmd.lprx_rssi_threshold));
+ }
+ return pos;
+}
+#endif
+
+const struct iwl_mvm_power_ops pm_legacy_ops = {
+ .power_update_mode = iwl_mvm_power_legacy_update_mode,
+ .power_disable = iwl_mvm_power_legacy_disable,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 29d49cf0fdb2..5c6ae16ec52b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -131,23 +131,22 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
- struct iwl_time_quota_cmd cmd;
- int i, idx, ret, num_active_bindings, quota, quota_rem;
+ struct iwl_time_quota_cmd cmd = {};
+ int i, idx, ret, num_active_macs, quota, quota_rem;
struct iwl_mvm_quota_iterator_data data = {
.n_interfaces = {},
.colors = { -1, -1, -1, -1 },
.new_vif = newvif,
};
+ lockdep_assert_held(&mvm->mutex);
+
/* update all upon completion */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
return 0;
- BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1);
-
- lockdep_assert_held(&mvm->mutex);
-
- memset(&cmd, 0, sizeof(cmd));
+ /* iterator data above must match */
+ BUILD_BUG_ON(MAX_BINDINGS != 4);
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -162,18 +161,17 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
* IWL_MVM_MAX_QUOTA fragments. Divide these fragments
* equally between all the bindings that require quota
*/
- num_active_bindings = 0;
+ num_active_macs = 0;
for (i = 0; i < MAX_BINDINGS; i++) {
cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
- if (data.n_interfaces[i] > 0)
- num_active_bindings++;
+ num_active_macs += data.n_interfaces[i];
}
quota = 0;
quota_rem = 0;
- if (num_active_bindings) {
- quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
- quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+ if (num_active_macs) {
+ quota = IWL_MVM_MAX_QUOTA / num_active_macs;
+ quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
}
for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -187,7 +185,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
cmd.quotas[idx].quota = cpu_to_le32(0);
cmd.quotas[idx].max_duration = cpu_to_le32(0);
} else {
- cmd.quotas[idx].quota = cpu_to_le32(quota);
+ cmd.quotas[idx].quota =
+ cpu_to_le32(quota * data.n_interfaces[i]);
cmd.quotas[idx].max_duration =
cpu_to_le32(IWL_MVM_MAX_QUOTA);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index b328a988c130..4ffaa3fa153f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -56,61 +56,61 @@
#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
static u8 rs_ht_to_legacy[] = {
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+ [IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX,
+ [IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX,
+ [IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX,
+ [IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX,
+ [IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX,
+ [IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX,
+ [IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX,
};
static const u8 ant_toggle_lookup[] = {
- /*ANT_NONE -> */ ANT_NONE,
- /*ANT_A -> */ ANT_B,
- /*ANT_B -> */ ANT_C,
- /*ANT_AB -> */ ANT_BC,
- /*ANT_C -> */ ANT_A,
- /*ANT_AC -> */ ANT_AB,
- /*ANT_BC -> */ ANT_AC,
- /*ANT_ABC -> */ ANT_ABC,
+ [ANT_NONE] = ANT_NONE,
+ [ANT_A] = ANT_B,
+ [ANT_B] = ANT_C,
+ [ANT_AB] = ANT_BC,
+ [ANT_C] = ANT_A,
+ [ANT_AC] = ANT_AB,
+ [ANT_BC] = ANT_AC,
+ [ANT_ABC] = ANT_ABC,
};
-#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
IWL_RATE_SISO_##s##M_PLCP, \
IWL_RATE_MIMO2_##s##M_PLCP,\
- IWL_RATE_MIMO3_##s##M_PLCP,\
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX }
+ IWL_RATE_##rn##M_INDEX }
/*
* Parameter order:
- * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ * rate, ht rate, prev rate, next rate
*
* If there isn't a valid next or previous rate then INV is used which
* maps to IWL_RATE_INVALID
*
*/
static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
- IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
- IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+ IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */
+ IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
+ IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
+ IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
+ IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */
+ IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */
+ IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */
+ IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */
+ IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */
+ IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */
+ IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */
+ IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */
/* FIXME:RS: ^^ should be INV (legacy) */
};
@@ -128,9 +128,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
if (rate_n_flags & RATE_MCS_HT_MSK) {
idx = rs_extract_rate(rate_n_flags);
- if (idx >= IWL_RATE_MIMO3_6M_PLCP)
- idx = idx - IWL_RATE_MIMO3_6M_PLCP;
- else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+ WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
+ if (idx >= IWL_RATE_MIMO2_6M_PLCP)
idx = idx - IWL_RATE_MIMO2_6M_PLCP;
idx += IWL_FIRST_OFDM_RATE;
@@ -162,10 +161,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index);
+ u32 *rate_n_flags);
#else
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
+ u32 *rate_n_flags)
{}
#endif
@@ -212,20 +211,6 @@ static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
- {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
- {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
- {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
- {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
- {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
- {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
-};
-
/* mbps, mcs */
static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
@@ -260,82 +245,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
return (ant_type & valid_antenna) == ant_type;
}
-/*
- * removes the old data from the statistics. All data that is older than
- * TID_MAX_TIME_DIFF, will be deleted.
- */
-static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
-{
- /* The oldest age we want to keep */
- u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
-
- while (tl->queue_count &&
- (tl->time_stamp < oldest_time)) {
- tl->total -= tl->packet_count[tl->head];
- tl->packet_count[tl->head] = 0;
- tl->time_stamp += TID_QUEUE_CELL_SPACING;
- tl->queue_count--;
- tl->head++;
- if (tl->head >= TID_QUEUE_MAX_SIZE)
- tl->head = 0;
- }
-}
-
-/*
- * increment traffic load value for tid and also remove
- * any old values if passed the certain time period
- */
-static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
- struct ieee80211_hdr *hdr)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
- u8 tid;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- } else {
- return IWL_MAX_TID_COUNT;
- }
-
- if (unlikely(tid >= IWL_MAX_TID_COUNT))
- return IWL_MAX_TID_COUNT;
-
- tl = &lq_data->load[tid];
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- /* Happens only for the first packet. Initialize the data */
- if (!(tl->queue_count)) {
- tl->total = 1;
- tl->time_stamp = curr_time;
- tl->queue_count = 1;
- tl->head = 0;
- tl->packet_count[0] = 1;
- return IWL_MAX_TID_COUNT;
- }
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- rs_tl_rm_old_stats(tl, curr_time);
-
- index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
- tl->packet_count[index] = tl->packet_count[index] + 1;
- tl->total = tl->total + 1;
-
- if ((index + 1) > tl->queue_count)
- tl->queue_count = index + 1;
-
- return tid;
-}
-
#ifdef CONFIG_MAC80211_DEBUGFS
/**
* Program the device to use fixed rate for frame transmit
@@ -349,7 +258,6 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
@@ -361,45 +269,11 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
}
#endif
-/*
- get the traffic load value for tid
-*/
-static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
-
- if (tid >= IWL_MAX_TID_COUNT)
- return 0;
-
- tl = &(lq_data->load[tid]);
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- if (!(tl->queue_count))
- return 0;
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- rs_tl_rm_old_stats(tl, curr_time);
-
- return tl->total;
-}
-
static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
int ret = -EAGAIN;
- u32 load;
-
- load = rs_tl_get_load(lq_data, tid);
/*
* Don't create TX aggregation sessions when in high
@@ -563,7 +437,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
else if (is_mimo2(tbl->lq_type))
rate_n_flags |= iwl_rates[index].plcp_mimo2;
else
- rate_n_flags |= iwl_rates[index].plcp_mimo3;
+ WARN_ON_ONCE(1);
} else {
IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
}
@@ -601,7 +475,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
u8 mcs;
- memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+ memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
if (*rate_idx == IWL_RATE_INVALID) {
@@ -640,12 +514,8 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
} else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
if (num_of_ant == 2)
tbl->lq_type = LQ_MIMO2;
- /* MIMO3 */
} else {
- if (num_of_ant == 3) {
- tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
- tbl->lq_type = LQ_MIMO3;
- }
+ WARN_ON_ONCE(num_of_ant == 3);
}
}
return 0;
@@ -711,10 +581,10 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
} else {
if (is_siso(rate_type))
return lq_sta->active_siso_rate;
- else if (is_mimo2(rate_type))
+ else {
+ WARN_ON_ONCE(!is_mimo2(rate_type));
return lq_sta->active_mimo2_rate;
- else
- return lq_sta->active_mimo3_rate;
+ }
}
}
@@ -1089,7 +959,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
}
/* Choose among many HT tables depending on number of streams
- * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
+ * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
* status */
if (is_siso(tbl->lq_type) && !tbl->is_ht40)
ht_tbl_pointer = expected_tpt_siso20MHz;
@@ -1097,12 +967,10 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
ht_tbl_pointer = expected_tpt_siso40MHz;
else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
ht_tbl_pointer = expected_tpt_mimo2_20MHz;
- else if (is_mimo2(tbl->lq_type))
+ else {
+ WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
ht_tbl_pointer = expected_tpt_mimo2_40MHz;
- else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40)
- ht_tbl_pointer = expected_tpt_mimo3_20MHz;
- else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
- ht_tbl_pointer = expected_tpt_mimo3_40MHz;
+ }
if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
tbl->expected_tpt = ht_tbl_pointer[0];
@@ -1274,58 +1142,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
}
/*
- * Set up search table for MIMO3
- */
-static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
- s8 is_green = lq_sta->is_green;
-
- if (!sta->ht_cap.ht_supported)
- return -1;
-
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
- return -1;
-
- /* Need both Tx chains/antennas to support MIMO */
- if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 3)
- return -1;
-
- IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
-
- tbl->lq_type = LQ_MIMO3;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
- rate_mask = lq_sta->active_mimo3_rate;
-
- if (iwl_is_ht40_tx_allowed(sta))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- rs_set_expected_tpt_table(lq_sta, tbl);
-
- rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n",
- rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
-
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
* Set up search table for SISO
*/
static int rs_switch_to_siso(struct iwl_mvm *mvm,
@@ -1434,21 +1250,14 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
}
break;
- case IWL_LEGACY_SWITCH_MIMO2_AB:
- case IWL_LEGACY_SWITCH_MIMO2_AC:
- case IWL_LEGACY_SWITCH_MIMO2_BC:
+ case IWL_LEGACY_SWITCH_MIMO2:
IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
/* Set up search table to try MIMO */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
- if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
+ search_tbl->ant_type = ANT_AB;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
@@ -1461,30 +1270,11 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
goto out;
}
break;
-
- case IWL_LEGACY_SWITCH_MIMO3_ABC:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n");
-
- /* Set up search table to try MIMO3 */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- search_tbl->ant_type = ANT_ABC;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
- break;
+ default:
+ WARN_ON_ONCE(1);
}
tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@@ -1496,7 +1286,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1531,7 +1321,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
- tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
+ tbl->action = IWL_SISO_SWITCH_MIMO2;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -1573,19 +1363,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
goto out;
}
break;
- case IWL_SISO_SWITCH_MIMO2_AB:
- case IWL_SISO_SWITCH_MIMO2_AC:
- case IWL_SISO_SWITCH_MIMO2_BC:
+ case IWL_SISO_SWITCH_MIMO2:
IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
- if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
+ search_tbl->ant_type = ANT_AB;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
@@ -1626,24 +1409,11 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
index, is_green);
update_search_tbl_counter = 1;
goto out;
- case IWL_SISO_SWITCH_MIMO3_ABC:
- IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- search_tbl->ant_type = ANT_ABC;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
- break;
+ default:
+ WARN_ON_ONCE(1);
}
tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_SISO_SWITCH_GI)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@@ -1655,7 +1425,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_SISO_SWITCH_GI)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1696,8 +1466,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
- tbl->action == IWL_MIMO2_SWITCH_SISO_C)
+ if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break;
default:
@@ -1730,7 +1499,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
case IWL_MIMO2_SWITCH_SISO_A:
case IWL_MIMO2_SWITCH_SISO_B:
- case IWL_MIMO2_SWITCH_SISO_C:
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
/* Set up new search table for SISO */
@@ -1738,10 +1506,8 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
search_tbl->ant_type = ANT_A;
- else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+ else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
search_tbl->ant_type = ANT_B;
- else
- search_tbl->ant_type = ANT_C;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
@@ -1784,26 +1550,11 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
index, is_green);
update_search_tbl_counter = 1;
goto out;
-
- case IWL_MIMO2_SWITCH_MIMO3_ABC:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- search_tbl->ant_type = ANT_ABC;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
+ default:
+ WARN_ON_ONCE(1);
}
tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_MIMO2_SWITCH_GI)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@@ -1814,7 +1565,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_MIMO2_SWITCH_GI)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1823,171 +1574,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
}
/*
- * Try to switch to new modulation mode from MIMO3
- */
-static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta, int index)
-{
- s8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
- int ret;
- u8 update_search_tbl_counter = 0;
-
- switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
- case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
- /* nothing */
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
- case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
- /* avoid antenna B and MIMO */
- if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
- tbl->action = IWL_MIMO3_SWITCH_SISO_A;
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
- /* avoid antenna B unless MIMO */
- if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
- tbl->action == IWL_MIMO3_SWITCH_SISO_C)
- tbl->action = IWL_MIMO3_SWITCH_SISO_A;
- break;
- default:
- IWL_ERR(mvm, "Invalid BT load %d",
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
- break;
- }
-
- start_action = tbl->action;
- while (1) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_MIMO3_SWITCH_ANTENNA1:
- case IWL_MIMO3_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n");
-
- if (tx_chains_num <= 3)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl))
- goto out;
- break;
- case IWL_MIMO3_SWITCH_SISO_A:
- case IWL_MIMO3_SWITCH_SISO_B:
- case IWL_MIMO3_SWITCH_SISO_C:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n");
-
- /* Set up new search table for SISO */
- memcpy(search_tbl, tbl, sz);
-
- if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
- search_tbl->ant_type = ANT_A;
- else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
- search_tbl->ant_type = ANT_B;
- else
- search_tbl->ant_type = ANT_C;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_siso(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO3_SWITCH_MIMO2_AB:
- case IWL_MIMO3_SWITCH_MIMO2_AC:
- case IWL_MIMO3_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n");
-
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO3_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n");
-
- /* Set up new search table for MIMO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- rs_set_expected_tpt_table(lq_sta, search_tbl);
- /*
- * If active table already uses the fastest possible
- * modulation (dual stream with short guard interval),
- * and it's working well, there's no need to look
- * for a better type of modulation!
- */
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
- }
- tbl->action++;
- if (tbl->action > IWL_MIMO3_SWITCH_GI)
- tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_MIMO3_SWITCH_GI)
- tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-}
-
-/*
* Check whether we should continue using same modulation mode, or
* begin search for a new mode, based on:
* 1) # tx successes or failures while using this mode
@@ -2086,6 +1672,22 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
}
+static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
+ struct ieee80211_hdr *hdr)
+{
+ u8 tid = IWL_MAX_TID_COUNT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ }
+
+ if (unlikely(tid > IWL_MAX_TID_COUNT))
+ tid = IWL_MAX_TID_COUNT;
+
+ return tid;
+}
+
/*
* Do rate scaling and search for new modulation mode.
*/
@@ -2129,7 +1731,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
- tid = rs_tl_add_packet(lq_sta, hdr);
+ tid = rs_get_tid(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
tid_data = &sta_priv->tid_data[tid];
@@ -2377,8 +1979,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
scale_action = 0;
if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
- (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+ IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
if (lq_sta->last_bt_traffic >
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
/*
@@ -2395,8 +1996,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
- (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+ IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
/* search for a new modulation */
rs_stay_in_table(lq_sta, true);
goto lq_update;
@@ -2456,7 +2056,7 @@ lq_update:
else if (is_mimo2(tbl->lq_type))
rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
else
- rs_move_mimo3_to_other(mvm, lq_sta, sta, index);
+ WARN_ON_ONCE(1);
/* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) {
@@ -2621,11 +2221,10 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
rate_idx -= IWL_FIRST_OFDM_RATE;
/* 6M and 9M shared same MCS index */
rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO3_6M_PLCP);
if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO3_6M_PLCP)
- rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
- else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO2_6M_PLCP)
+ IWL_RATE_MIMO2_6M_PLCP)
rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
@@ -2688,9 +2287,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->flush_timer = 0;
lq_sta->supp_rates = sta->supp_rates[sband->band];
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
IWL_DEBUG_RATE(mvm,
"LQ: *** rate scale station global init for station %d ***\n",
@@ -2727,16 +2323,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->active_mimo2_rate &= ~((u16)0x2);
lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
- lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
- lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
- lq_sta->active_mimo3_rate &= ~((u16)0x2);
- lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
-
IWL_DEBUG_RATE(mvm,
- "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
+ "SISO-RATE=%X MIMO2-RATE=%X\n",
lq_sta->active_siso_rate,
- lq_sta->active_mimo2_rate,
- lq_sta->active_mimo3_rate);
+ lq_sta->active_mimo2_rate);
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
@@ -2780,7 +2370,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
/* Override starting rate (index 0) if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+ rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Interpret new_rate (rate_n_flags) */
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@@ -2827,7 +2417,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
}
/* Override next rate if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+ rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Fill next table entry */
lq_cmd->rs_table[index] =
@@ -2869,7 +2459,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
use_ht_possible = 0;
/* Override next rate if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+ rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Fill next table entry */
lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
@@ -2914,7 +2504,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
+ u32 *rate_n_flags)
{
struct iwl_mvm *mvm;
u8 valid_tx_ant;
@@ -2999,8 +2589,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
desc += sprintf(buff+desc, " %s",
- (is_siso(tbl->lq_type)) ? "SISO" :
- ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
+ (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
desc += sprintf(buff+desc, " %s",
(tbl->is_ht40) ? "40MHz" : "20MHz");
desc += sprintf(buff+desc, " %s %s %s\n",
@@ -3100,32 +2689,6 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.llseek = default_llseek,
};
-static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
- char buff[120];
- int desc = 0;
-
- if (is_Ht(tbl->lq_type))
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- tbl->expected_tpt[lq_sta->last_txrate_idx]);
- else
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
-
- return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-}
-
-static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
- .read = rs_sta_dbgfs_rate_scale_data_read,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -3135,9 +2698,6 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops);
- lq_sta->rs_sta_dbgfs_rate_scale_data_file =
- debugfs_create_file("rate_scale_data", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
&lq_sta->tx_agg_tid_en);
@@ -3148,7 +2708,6 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
struct iwl_lq_sta *lq_sta = mvm_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
}
#endif
@@ -3159,8 +2718,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
* station is added we ignore it.
*/
static void rs_rate_init_stub(void *mvm_r,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *mvm_sta)
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *mvm_sta)
{
}
static struct rate_control_ops rs_mvm_ops = {
@@ -3193,13 +2753,14 @@ void iwl_mvm_rate_control_unregister(void)
* iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
* Tx protection, according to this rquest and previous requests,
* and send the LQ command.
- * @lq: The LQ command
* @mvmsta: The station
* @enable: Enable Tx protection?
*/
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
- struct iwl_mvm_sta *mvmsta, bool enable)
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
{
+ struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
+
lockdep_assert_held(&mvm->mutex);
if (enable) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index cff4f6da7733..335cf1682902 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -38,14 +38,8 @@ struct iwl_rs_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
- u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
u8 prev_rs; /* previous rate used in rs algo */
u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
};
#define IWL_RATE_60M_PLCP 3
@@ -120,23 +114,6 @@ enum {
IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
};
-/* MAC header values for bit rates */
-enum {
- IWL_RATE_6M_IEEE = 12,
- IWL_RATE_9M_IEEE = 18,
- IWL_RATE_12M_IEEE = 24,
- IWL_RATE_18M_IEEE = 36,
- IWL_RATE_24M_IEEE = 48,
- IWL_RATE_36M_IEEE = 72,
- IWL_RATE_48M_IEEE = 96,
- IWL_RATE_54M_IEEE = 108,
- IWL_RATE_60M_IEEE = 120,
- IWL_RATE_1M_IEEE = 2,
- IWL_RATE_2M_IEEE = 4,
- IWL_RATE_5M_IEEE = 11,
- IWL_RATE_11M_IEEE = 22,
-};
-
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
#define IWL_INVALID_VALUE -1
@@ -165,47 +142,22 @@ enum {
#define IWL_LEGACY_SWITCH_ANTENNA1 0
#define IWL_LEGACY_SWITCH_ANTENNA2 1
#define IWL_LEGACY_SWITCH_SISO 2
-#define IWL_LEGACY_SWITCH_MIMO2_AB 3
-#define IWL_LEGACY_SWITCH_MIMO2_AC 4
-#define IWL_LEGACY_SWITCH_MIMO2_BC 5
-#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
+#define IWL_LEGACY_SWITCH_MIMO2 3
/* possible actions when in siso mode */
#define IWL_SISO_SWITCH_ANTENNA1 0
#define IWL_SISO_SWITCH_ANTENNA2 1
-#define IWL_SISO_SWITCH_MIMO2_AB 2
-#define IWL_SISO_SWITCH_MIMO2_AC 3
-#define IWL_SISO_SWITCH_MIMO2_BC 4
-#define IWL_SISO_SWITCH_GI 5
-#define IWL_SISO_SWITCH_MIMO3_ABC 6
-
+#define IWL_SISO_SWITCH_MIMO2 2
+#define IWL_SISO_SWITCH_GI 3
/* possible actions when in mimo mode */
#define IWL_MIMO2_SWITCH_ANTENNA1 0
#define IWL_MIMO2_SWITCH_ANTENNA2 1
#define IWL_MIMO2_SWITCH_SISO_A 2
#define IWL_MIMO2_SWITCH_SISO_B 3
-#define IWL_MIMO2_SWITCH_SISO_C 4
-#define IWL_MIMO2_SWITCH_GI 5
-#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
-
+#define IWL_MIMO2_SWITCH_GI 4
-/* possible actions when in mimo3 mode */
-#define IWL_MIMO3_SWITCH_ANTENNA1 0
-#define IWL_MIMO3_SWITCH_ANTENNA2 1
-#define IWL_MIMO3_SWITCH_SISO_A 2
-#define IWL_MIMO3_SWITCH_SISO_B 3
-#define IWL_MIMO3_SWITCH_SISO_C 4
-#define IWL_MIMO3_SWITCH_MIMO2_AB 5
-#define IWL_MIMO3_SWITCH_MIMO2_AC 6
-#define IWL_MIMO3_SWITCH_MIMO2_BC 7
-#define IWL_MIMO3_SWITCH_GI 8
-
-
-#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
-
-/*FIXME:RS:add possible actions for MIMO3*/
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
#define IWL_ACTION_LIMIT 3 /* # possible actions */
@@ -240,15 +192,13 @@ enum iwl_table_type {
LQ_A,
LQ_SISO, /* high-throughput types */
LQ_MIMO2,
- LQ_MIMO3,
LQ_MAX,
};
#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
#define is_siso(tbl) ((tbl) == LQ_SISO)
#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
-#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
+#define is_mimo(tbl) is_mimo2(tbl)
#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
#define is_a_band(tbl) ((tbl) == LQ_A)
#define is_g_and(tbl) ((tbl) == LQ_G)
@@ -290,17 +240,6 @@ struct iwl_scale_tbl_info {
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
-struct iwl_traffic_load {
- unsigned long time_stamp; /* age of the oldest statistics */
- u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
- * slice */
- u32 total; /* total num of packets during the
- * last TID_MAX_TIME_DIFF */
- u8 queue_count; /* number of queues that has
- * been used since the last cleanup */
- u8 head; /* start of the circular buffer */
-};
-
/**
* struct iwl_lq_sta -- driver's rate scaling private structure
*
@@ -331,18 +270,15 @@ struct iwl_lq_sta {
u16 active_legacy_rate;
u16 active_siso_rate;
u16 active_mimo2_rate;
- u16 active_mimo3_rate;
s8 max_rate_idx; /* Max rate set by user */
u8 missed_rate_counter;
struct iwl_lq_cmd lq;
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
struct dentry *rs_sta_dbgfs_stats_table_file;
- struct dentry *rs_sta_dbgfs_rate_scale_data_file;
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
u32 dbg_fixed_rate;
#endif
@@ -404,7 +340,7 @@ extern void iwl_mvm_rate_control_unregister(void);
struct iwl_mvm_sta;
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
- struct iwl_mvm_sta *mvmsta, bool enable);
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable);
#endif /* __rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index e4930d5027d2..2a8cb5a60535 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -124,24 +124,15 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
ieee80211_rx_ni(mvm->hw, skb);
}
-/*
- * iwl_mvm_calc_rssi - calculate the rssi in dBm
- * @phy_info: the phy information for the coming packet
- */
-static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
- struct iwl_rx_phy_info *phy_info)
+static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
+ struct iwl_rx_phy_info *phy_info,
+ struct ieee80211_rx_status *rx_status)
{
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
int rssi_all_band_a, rssi_all_band_b;
u32 agc_a, agc_b, max_agc;
u32 val;
- /* Find max rssi among 2 possible receivers.
- * These values are measured by the Digital Signal Processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's Automatic Gain Control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info.
- */
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
@@ -166,7 +157,51 @@ static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
- return max_rssi_dbm;
+ rx_status->signal = max_rssi_dbm;
+ rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
+ RX_RES_PHY_FLAGS_ANTENNA)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+ rx_status->chain_signal[0] = rssi_a_dbm;
+ rx_status->chain_signal[1] = rssi_b_dbm;
+}
+
+/*
+ * iwl_mvm_get_signal_strength - use new rx PHY INFO API
+ * values are reported by the fw as positive values - need to negate
+ * to obtain their dBM. Account for missing antennas by replacing 0
+ * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
+ */
+static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+ struct iwl_rx_phy_info *phy_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ int energy_a, energy_b, energy_c, max_energy;
+ u32 val;
+
+ val =
+ le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
+ energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_A_POS;
+ energy_a = energy_a ? -energy_a : -256;
+ energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_B_POS;
+ energy_b = energy_b ? -energy_b : -256;
+ energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_C_POS;
+ energy_c = energy_c ? -energy_c : -256;
+ max_energy = max(energy_a, energy_b);
+ max_energy = max(max_energy, energy_c);
+
+ IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
+ energy_a, energy_b, energy_c, max_energy);
+
+ rx_status->signal = max_energy;
+ rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
+ RX_RES_PHY_FLAGS_ANTENNA)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+ rx_status->chain_signal[0] = energy_a;
+ rx_status->chain_signal[1] = energy_b;
+ rx_status->chain_signal[2] = energy_c;
}
/*
@@ -289,29 +324,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
*/
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info);
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
+ iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
+ else
+ iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
(unsigned long long)rx_status.mactime);
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) &
- RX_RES_PHY_FLAGS_ANTENNA)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
/* set the preamble flag if appropriate */
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
rx_status.flag |= RX_FLAG_SHORTPRE;
@@ -364,11 +384,74 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
+static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_notif_statistics *stats)
+{
+ /*
+ * NOTE FW aggregates the statistics - BUT the statistics are cleared
+ * when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS
+ * bit set.
+ */
+ lockdep_assert_held(&mvm->mutex);
+ memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
+}
+
+struct iwl_mvm_stat_data {
+ struct iwl_notif_statistics *stats;
+ struct iwl_mvm *mvm;
+};
+
+static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_stat_data *data = _data;
+ struct iwl_notif_statistics *stats = data->stats;
+ struct iwl_mvm *mvm = data->mvm;
+ int sig = -stats->general.beacon_filter_average_energy;
+ int last_event;
+ int thold = vif->bss_conf.cqm_rssi_thold;
+ int hyst = vif->bss_conf.cqm_rssi_hyst;
+ u16 id = le32_to_cpu(stats->rx.general.mac_id);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->id != id)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ mvmvif->bf_data.ave_beacon_signal = sig;
+
+ if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return;
+
+ /* CQM Notification */
+ last_event = mvmvif->bf_data.last_cqm_event;
+ if (thold && sig < thold && (last_event == 0 ||
+ sig < last_event - hyst)) {
+ mvmvif->bf_data.last_cqm_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
+ sig);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ GFP_KERNEL);
+ } else if (sig > thold &&
+ (last_event == 0 || sig > last_event + hyst)) {
+ mvmvif->bf_data.last_cqm_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
+ sig);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ GFP_KERNEL);
+ }
+}
+
/*
* iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
*
* TODO: This handler is implemented partially.
- * It only gets the NIC's temperature.
*/
int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@@ -377,11 +460,20 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_notif_statistics *stats = (void *)&pkt->data;
struct mvm_statistics_general_common *common = &stats->general.common;
+ struct iwl_mvm_stat_data data = {
+ .stats = stats,
+ .mvm = mvm,
+ };
if (mvm->temperature != le32_to_cpu(common->temperature)) {
mvm->temperature = le32_to_cpu(common->temperature);
iwl_mvm_tt_handler(mvm);
}
+ iwl_mvm_update_rx_statistics(mvm, stats);
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_stat_iterator,
+ &data);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index acdff6b67e04..9a7ab8495300 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -301,10 +301,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
*/
if (req->n_ssids > 0) {
cmd->passive2active = cpu_to_le16(1);
+ cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
ssid = req->ssids[0].ssid;
ssid_len = req->ssids[0].ssid_len;
} else {
cmd->passive2active = 0;
+ cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
}
iwl_mvm_scan_fill_ssids(cmd, req);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 563f559b902d..44add291531b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -826,8 +826,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* method for HT traffic
* this function also sends the LQ command
*/
- return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
- mvmsta, true);
+ return iwl_mvm_tx_protection(mvm, mvmsta, true);
/*
* TODO: remove the TLC_RTS flag when we tear down the last
* AGG session (agg_tids_count in DVM)
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 7fd6fbfbc1b3..c17b74c31398 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -73,7 +73,6 @@
#include "iwl-prph.h"
/* A TimeUnit is 1024 microsecond */
-#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024))
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
/*
@@ -185,7 +184,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
}
}
- if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
+ if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
IWL_DEBUG_TE(mvm,
"TE ended - current time %lu, estimated end %lu\n",
jiffies, te_data->end_jiffies);
@@ -202,10 +201,9 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
"No assocation and the time event is over already...");
iwl_mvm_te_clear_data(mvm, te_data);
- } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
+ } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
te_data->running = true;
- te_data->end_jiffies = jiffies +
- TU_TO_JIFFIES(te_data->duration);
+ te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
@@ -270,10 +268,67 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
return true;
}
+/* used to convert from time event API v2 to v1 */
+#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
+ TE_V2_EVENT_SOCIOPATHIC)
+static inline u16 te_v2_get_notify(__le16 policy)
+{
+ return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
+}
+
+static inline u16 te_v2_get_dep_policy(__le16 policy)
+{
+ return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
+ TE_V2_PLACEMENT_POS;
+}
+
+static inline u16 te_v2_get_absence(__le16 policy)
+{
+ return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
+}
+
+static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
+ struct iwl_time_event_cmd_v1 *cmd_v1)
+{
+ cmd_v1->id_and_color = cmd_v2->id_and_color;
+ cmd_v1->action = cmd_v2->action;
+ cmd_v1->id = cmd_v2->id;
+ cmd_v1->apply_time = cmd_v2->apply_time;
+ cmd_v1->max_delay = cmd_v2->max_delay;
+ cmd_v1->depends_on = cmd_v2->depends_on;
+ cmd_v1->interval = cmd_v2->interval;
+ cmd_v1->duration = cmd_v2->duration;
+ if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
+ cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
+ else
+ cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
+ cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
+ cmd_v1->interval_reciprocal = 0; /* unused */
+
+ cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
+ cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
+ cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
+}
+
+static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
+ const struct iwl_time_event_cmd_v2 *cmd)
+{
+ struct iwl_time_event_cmd_v1 cmd_v1;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
+ return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+ sizeof(*cmd), cmd);
+
+ iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
+ return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+ sizeof(cmd_v1), &cmd_v1);
+}
+
+
static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_time_event_data *te_data,
- struct iwl_time_event_cmd *te_cmd)
+ struct iwl_time_event_cmd_v2 *te_cmd)
{
static const u8 time_event_response[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event;
@@ -309,8 +364,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
ARRAY_SIZE(time_event_response),
iwl_mvm_time_event_response, te_data);
- ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(*te_cmd), te_cmd);
+ ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
if (ret) {
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -337,13 +391,12 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd time_cmd = {};
+ struct iwl_time_event_cmd_v2 time_cmd = {};
lockdep_assert_held(&mvm->mutex);
if (te_data->running &&
- time_after(te_data->end_jiffies,
- jiffies + TU_TO_JIFFIES(min_duration))) {
+ time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
jiffies_to_msecs(te_data->end_jiffies - jiffies));
return;
@@ -372,17 +425,14 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
time_cmd.apply_time =
cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
- time_cmd.dep_policy = TE_INDEPENDENT;
- time_cmd.is_present = cpu_to_le32(1);
- time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
+ time_cmd.max_frags = TE_V2_FRAG_NONE;
time_cmd.max_delay = cpu_to_le32(500);
/* TODO: why do we need to interval = bi if it is not periodic? */
time_cmd.interval = cpu_to_le32(1);
- time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
time_cmd.duration = cpu_to_le32(duration);
- time_cmd.repeat = cpu_to_le32(1);
- time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
- TE_NOTIF_HOST_EVENT_END);
+ time_cmd.repeat = 1;
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_NOTIF_HOST_EVENT_END);
iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
@@ -396,7 +446,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_time_event_data *te_data)
{
- struct iwl_time_event_cmd time_cmd = {};
+ struct iwl_time_event_cmd_v2 time_cmd = {};
u32 id, uid;
int ret;
@@ -433,8 +483,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
- ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(time_cmd), &time_cmd);
+ ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
if (WARN_ON(ret))
return;
}
@@ -454,7 +503,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd time_cmd = {};
+ struct iwl_time_event_cmd_v2 time_cmd = {};
lockdep_assert_held(&mvm->mutex);
if (te_data->running) {
@@ -485,8 +534,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
time_cmd.apply_time = cpu_to_le32(0);
- time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
- time_cmd.is_present = cpu_to_le32(1);
time_cmd.interval = cpu_to_le32(1);
/*
@@ -495,12 +542,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* scheduled. To improve the chances of it being scheduled, allow them
* to be fragmented, and in addition allow them to be delayed.
*/
- time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20);
+ time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
- time_cmd.repeat = cpu_to_le32(1);
- time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
- TE_NOTIF_HOST_EVENT_END);
+ time_cmd.repeat = 1;
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_NOTIF_HOST_EVENT_END);
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index d6ae7f16ac11..1f3282dff513 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -391,8 +391,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
mvmsta = (void *)sta->drv_priv;
if (enable == mvmsta->tt_tx_protection)
continue;
- err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
- mvmsta, enable);
+ err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
if (err) {
IWL_ERR(mvm, "Failed to %s Tx protection\n",
enable ? "enable" : "disable");
@@ -513,12 +512,39 @@ static const struct iwl_tt_params iwl7000_tt_params = {
.support_tx_backoff = true,
};
+static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
+ .ct_kill_entry = 118,
+ .ct_kill_exit = 96,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 114,
+ .dynamic_smps_exit = 110,
+ .tx_protection_entry = 114,
+ .tx_protection_exit = 108,
+ .tx_backoff = {
+ {.temperature = 112, .backoff = 300},
+ {.temperature = 113, .backoff = 800},
+ {.temperature = 114, .backoff = 1500},
+ {.temperature = 115, .backoff = 3000},
+ {.temperature = 116, .backoff = 5000},
+ {.temperature = 117, .backoff = 10000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
- tt->params = &iwl7000_tt_params;
+
+ if (mvm->cfg->high_temp)
+ tt->params = &iwl7000_high_temp_tt_params;
+ else
+ tt->params = &iwl7000_tt_params;
+
tt->throttle = false;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index f0e96a927407..e05440d90319 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -91,11 +91,10 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
/* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
- if (info->band == IEEE80211_BAND_2GHZ &&
- (skb->protocol == cpu_to_be16(ETH_P_PAE) ||
- is_multicast_ether_addr(hdr->addr1) ||
- ieee80211_is_back_req(fc) ||
- ieee80211_is_mgmt(fc)))
+ if (info->band == IEEE80211_BAND_2GHZ &&
+ (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
+ is_multicast_ether_addr(hdr->addr1) ||
+ ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
tx_flags |= TX_CMD_FLG_BT_DIS;
if (ieee80211_has_morefrags(fc))
@@ -123,6 +122,8 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
* it
*/
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
+ } else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ tx_cmd->pm_frame_timeout = cpu_to_le16(2);
} else {
tx_cmd->pm_frame_timeout = 0;
}
@@ -171,7 +172,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
}
/*
- * for data packets, rate info comes from the table inside he fw. This
+ * for data packets, rate info comes from the table inside the fw. This
* table is controlled by LINK_QUALITY commands
*/
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 1e1332839e4a..a9c357491434 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -453,6 +453,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
}
+void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
+{
+ const struct fw_img *img;
+ int ofs, len = 0;
+ u8 *buf;
+
+ if (!mvm->ucode_loaded)
+ return;
+
+ img = &mvm->fw->img[mvm->cur_ucode];
+ ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
+ iwl_print_hex_error(mvm->trans, buf, len);
+
+ kfree(buf);
+}
+
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index ff13458efc27..dc02cb9792af 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -273,9 +273,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
@@ -325,15 +325,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
- if (iwl_trans == NULL)
- return -ENOMEM;
+ if (IS_ERR(iwl_trans))
+ return PTR_ERR(iwl_trans);
pci_set_drvdata(pdev, iwl_trans);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
- if (IS_ERR_OR_NULL(trans_pcie->drv)) {
+ if (IS_ERR(trans_pcie->drv)) {
ret = PTR_ERR(trans_pcie->drv);
goto out_free_trans;
}
@@ -368,21 +368,19 @@ static void iwl_pci_remove(struct pci_dev *pdev)
static int iwl_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
-
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
- return iwl_trans_suspend(iwl_trans);
+ return 0;
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+ bool hw_rfkill;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -395,7 +393,15 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- return iwl_trans_resume(iwl_trans);
+ if (!trans->op_mode)
+ return 0;
+
+ iwl_enable_rfkill_int(trans);
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+ return 0;
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index b654dcdd048a..fa22639b63c9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -392,7 +392,6 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
/*****************************************************
* Error handling
******************************************************/
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
void iwl_pcie_dump_csr(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fd848cd1583e..3f237b42eb36 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -112,15 +112,16 @@
*/
static int iwl_rxq_space(const struct iwl_rxq *rxq)
{
- int s = rxq->read - rxq->write;
-
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+ /* Make sure RX_QUEUE_SIZE is a power of 2 */
+ BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
+
+ /*
+ * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
+ * between empty and completely full queues.
+ * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
+ * defined for negative dividends.
+ */
+ return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
}
/*
@@ -793,7 +794,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
}
iwl_pcie_dump_csr(trans);
- iwl_pcie_dump_fh(trans, NULL);
+ iwl_dump_fh(trans, NULL);
set_bit(STATUS_FW_ERROR, &trans_pcie->status);
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
@@ -1120,6 +1121,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
struct iwl_trans *trans = data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta, inta_mask;
+ irqreturn_t ret = IRQ_NONE;
lockdep_assert_held(&trans_pcie->irq_lock);
@@ -1168,10 +1170,8 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
/* the thread will service interrupts and re-enable them */
if (likely(inta))
return IRQ_WAKE_THREAD;
- else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
- return IRQ_HANDLED;
+
+ ret = IRQ_HANDLED;
none:
/* re-enable interrupts here since we don't have anything to service. */
@@ -1180,7 +1180,7 @@ none:
!trans_pcie->inta)
iwl_enable_interrupts(trans);
- return IRQ_NONE;
+ return ret;
}
/* interrupt handler using ict table, with this interrupt driver will
@@ -1199,6 +1199,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
u32 val = 0;
u32 read;
unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
if (!trans)
return IRQ_NONE;
@@ -1211,7 +1212,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
* use legacy interrupt.
*/
if (unlikely(!trans_pcie->use_ict)) {
- irqreturn_t ret = iwl_pcie_isr(irq, data);
+ ret = iwl_pcie_isr(irq, data);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return ret;
}
@@ -1280,17 +1281,9 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
if (likely(inta)) {
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_WAKE_THREAD;
- } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(trans);
}
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_HANDLED;
+ ret = IRQ_HANDLED;
none:
/* re-enable interrupts here since we don't have anything to service.
@@ -1301,5 +1294,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
iwl_enable_interrupts(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_NONE;
+ return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 390e2f058aff..bad95d28d50d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -820,25 +820,6 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
}
-#ifdef CONFIG_PM_SLEEP
-static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
-{
- return 0;
-}
-
-static int iwl_trans_pcie_resume(struct iwl_trans *trans)
-{
- bool hw_rfkill;
-
- iwl_enable_rfkill_int(trans);
-
- hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
-
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
unsigned long *flags)
{
@@ -1038,71 +1019,6 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
-static const char *get_fh_string(int cmd)
-{
-#define IWL_CMD(x) case x: return #x
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-#undef IWL_CMD
-}
-
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
-{
- int i;
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (buf) {
- int pos = 0;
- size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
-
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
-
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
-
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(trans, fh_tbl[i]));
-
- return pos;
- }
-#endif
-
- IWL_ERR(trans, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
- IWL_ERR(trans, " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(trans, fh_tbl[i]));
-
- return 0;
-}
-
static const char *get_csr_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
@@ -1183,18 +1099,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
} while (0)
/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
@@ -1202,7 +1107,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.open = simple_open, \
@@ -1210,8 +1114,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.read = iwl_dbgfs_##name##_read, \
@@ -1395,7 +1297,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
int pos = 0;
ssize_t ret = -EFAULT;
- ret = pos = iwl_pcie_dump_fh(trans, &buf);
+ ret = pos = iwl_dump_fh(trans, &buf);
if (buf) {
ret = simple_read_from_buffer(user_buf,
count, ppos, buf, pos);
@@ -1459,10 +1361,6 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
-#ifdef CONFIG_PM_SLEEP
- .suspend = iwl_trans_pcie_suspend,
- .resume = iwl_trans_pcie_resume,
-#endif
.write8 = iwl_trans_pcie_write8,
.write32 = iwl_trans_pcie_write32,
.read32 = iwl_trans_pcie_read32,
@@ -1488,9 +1386,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans = kzalloc(sizeof(struct iwl_trans) +
sizeof(struct iwl_trans_pcie), GFP_KERNEL);
-
- if (!trans)
- return NULL;
+ if (!trans) {
+ err = -ENOMEM;
+ goto out;
+ }
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1502,15 +1401,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_no_pci;
+ if (!cfg->base_params->pcie_l1_allowed) {
+ /*
+ * W/A - seems to solve weird behavior. We need to remove this
+ * if we don't want to stay in L1 all the time. This wastes a
+ * lot of power.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
}
- /* W/A - seems to solve weird behavior. We need to remove this if we
- * don't want to stay in L1 all the time. This wastes a lot of power */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
+ err = pci_enable_device(pdev);
+ if (err)
+ goto out_no_pci;
pci_set_master(pdev);
@@ -1579,17 +1483,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
SLAB_HWCACHE_ALIGN,
NULL);
- if (!trans->dev_cmd_pool)
+ if (!trans->dev_cmd_pool) {
+ err = -ENOMEM;
goto out_pci_disable_msi;
+ }
trans_pcie->inta_mask = CSR_INI_SET_MASK;
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
- if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
- iwl_pcie_irq_handler,
- IRQF_SHARED, DRV_NAME, trans)) {
+ err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
+ if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}
@@ -1608,5 +1515,6 @@ out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
kfree(trans);
- return NULL;
+out:
+ return ERR_PTR(err);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index c47c92165aba..f45eb29c2ede 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -65,18 +65,30 @@
***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
- int s = q->read_ptr - q->write_ptr;
-
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
-
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than q->n_bd elements in the queue.
+ * If q->n_window is smaller than q->n_bd, there is no need to reserve
+ * any queue entries for this purpose.
+ */
+ if (q->n_window < q->n_bd)
+ max = q->n_window;
+ else
+ max = q->n_bd - 1;
+
+ /*
+ * q->n_bd is a power of 2, so the following is equivalent to modulo by
+ * q->n_bd and is well defined for negative dividends.
+ */
+ used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
}
/*
@@ -451,13 +463,10 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
return -EINVAL;
}
- if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
+ if (WARN(addr & ~IWL_TX_DMA_MASK,
+ "Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(trans, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
return 0;
@@ -829,7 +838,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
sizeof(struct iwl_txq), GFP_KERNEL);
if (!trans_pcie->txq) {
IWL_ERR(trans, "Not enough memory for txq\n");
- ret = ENOMEM;
+ ret = -ENOMEM;
goto error;
}
@@ -1153,10 +1162,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
/*
* iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
- * @cmd: a point to the ucode command structure
+ * @cmd: a pointer to the ucode command structure
*
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
* command queue.
*/
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
@@ -1619,10 +1628,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
- if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
- WARN_ON_ONCE(1);
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ "TX on unused queue %d\n", txq_id))
return -EINVAL;
- }
spin_lock(&txq->lock);
@@ -1632,7 +1640,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* Check here that the packets are in the right place on the ring.
*/
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- WARN_ONCE(trans_pcie->txq[txq_id].ampdu &&
+ WARN_ONCE(txq->ampdu &&
(wifi_seq & 0xff) != q->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
txq_id, wifi_seq, q->write_ptr);
@@ -1664,7 +1672,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
- tb1_len = (len + 3) & ~3;
+ tb1_len = ALIGN(len, 4);
/* Tell NIC about any 2-byte padding after MAC header */
if (tb1_len != len)
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index efae07e05c80..6fef746345bc 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1017,7 +1017,7 @@ static int lbs_add_mesh(struct lbs_private *priv)
mesh_dev->netdev_ops = &mesh_netdev_ops;
mesh_dev->ethtool_ops = &lbs_ethtool_ops;
- memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(mesh_dev, priv->dev);
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index cb34c7895f2a..2cd3f54e1efa 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -867,7 +867,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
if (WARN_ON(skb->len < 10)) {
/* Should not happen; just a sanity check for addr1 use */
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
@@ -884,13 +884,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
}
if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
if (data->idle && !data->tmp_chan) {
wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
@@ -1364,6 +1364,7 @@ static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
static int hwsim_fops_ps_write(void *dat, u64 val);
static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
void *data, int len)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -2309,7 +2310,9 @@ static int __init init_mac80211_hwsim(void)
hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
@@ -2525,8 +2528,10 @@ static int __init init_mac80211_hwsim(void)
}
hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
- if (hwsim_mon == NULL)
+ if (hwsim_mon == NULL) {
+ err = -ENOMEM;
goto failed;
+ }
rtnl_lock();
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 41e9d25a2d8e..0b803c05cab3 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -292,6 +292,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
struct mwifiex_ie_types_extcap *ext_cap;
int ret_len = 0;
struct ieee80211_supported_band *sband;
+ struct ieee_types_header *hdr;
u8 radio_type;
if (!buffer || !*buffer)
@@ -388,17 +389,24 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
}
if (bss_desc->bcn_ext_cap) {
+ hdr = (void *)bss_desc->bcn_ext_cap;
ext_cap = (struct mwifiex_ie_types_extcap *) *buffer;
memset(ext_cap, 0, sizeof(struct mwifiex_ie_types_extcap));
ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
- ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap));
+ ext_cap->header.len = cpu_to_le16(hdr->len);
- memcpy((u8 *)ext_cap + sizeof(struct mwifiex_ie_types_header),
+ memcpy((u8 *)ext_cap->ext_capab,
bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header),
le16_to_cpu(ext_cap->header.len));
- *buffer += sizeof(struct mwifiex_ie_types_extcap);
- ret_len += sizeof(struct mwifiex_ie_types_extcap);
+ if (hdr->len > 3 &&
+ ext_cap->ext_capab[3] & WLAN_EXT_CAPA4_INTERWORKING_ENABLED)
+ priv->hs2_enabled = true;
+ else
+ priv->hs2_enabled = false;
+
+ *buffer += sizeof(struct mwifiex_ie_types_extcap) + hdr->len;
+ ret_len += sizeof(struct mwifiex_ie_types_extcap) + hdr->len;
}
return ret_len;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index a78e0651409c..21c688264708 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -69,7 +69,8 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
/* Copy SNAP header */
- snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset);
+ snap.snap_type =
+ le16_to_cpu(*(__le16 *) ((u8 *)skb_src->data + dt_offset));
dt_offset += sizeof(u16);
memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
@@ -189,7 +190,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_src = skb_dequeue(&pra_list->skb_head);
- pra_list->total_pkts_size -= skb_src->len;
+ pra_list->total_pkt_count--;
atomic_dec(&priv->wmm.tx_pkts_queued);
@@ -268,7 +269,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_queue_tail(&pra_list->skb_head, skb_aggr);
- pra_list->total_pkts_size += skb_aggr->len;
+ pra_list->total_pkt_count++;
atomic_inc(&priv->wmm.tx_pkts_queued);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 89459db4c53b..fbad00a5abc8 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -25,7 +25,9 @@ module_param(reg_alpha2, charp, 0);
static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
{
- .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
+ .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
},
{
.max = 1, .types = BIT(NL80211_IFTYPE_AP),
@@ -189,6 +191,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct sk_buff *skb;
u16 pkt_len;
const struct ieee80211_mgmt *mgmt;
+ struct mwifiex_txinfo *tx_info;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
if (!buf || !len) {
@@ -216,6 +219,10 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
return -ENOMEM;
}
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+
mwifiex_form_mgmt_frame(skb, buf, len);
mwifiex_queue_tx_pkt(priv, skb);
@@ -235,16 +242,20 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
u16 frame_type, bool reg)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+ u32 mask;
if (reg)
- priv->mgmt_frame_mask |= BIT(frame_type >> 4);
+ mask = priv->mgmt_frame_mask | BIT(frame_type >> 4);
else
- priv->mgmt_frame_mask &= ~BIT(frame_type >> 4);
-
- mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
- HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask);
+ mask = priv->mgmt_frame_mask & ~BIT(frame_type >> 4);
- wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+ if (mask != priv->mgmt_frame_mask) {
+ priv->mgmt_frame_mask = mask;
+ mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->mgmt_frame_mask);
+ wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+ }
}
/*
@@ -1497,6 +1508,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
" reason code %d\n", priv->cfg_bssid, reason_code);
memset(priv->cfg_bssid, 0, ETH_ALEN);
+ priv->hs2_enabled = false;
return 0;
}
@@ -2296,10 +2308,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
}
EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
-#ifdef CONFIG_PM
static bool
-mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
- s8 *byte_seq)
+mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
+ u8 max_byte_seq)
{
int j, k, valid_byte_cnt = 0;
bool dont_care_byte = false;
@@ -2317,16 +2328,17 @@ mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
dont_care_byte = true;
}
- if (valid_byte_cnt > MAX_BYTESEQ)
+ if (valid_byte_cnt > max_byte_seq)
return false;
}
}
- byte_seq[MAX_BYTESEQ] = valid_byte_cnt;
+ byte_seq[max_byte_seq] = valid_byte_cnt;
return true;
}
+#ifdef CONFIG_PM
static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wowlan)
{
@@ -2335,7 +2347,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
struct mwifiex_mef_entry *mef_entry;
int i, filt_num = 0, ret;
bool first_pat = true;
- u8 byte_seq[MAX_BYTESEQ + 1];
+ u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
const u8 ipv4_mc_mac[] = {0x33, 0x33};
const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
struct mwifiex_private *priv =
@@ -2365,7 +2377,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
for (i = 0; i < wowlan->n_patterns; i++) {
memset(byte_seq, 0, sizeof(byte_seq));
if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
- byte_seq)) {
+ byte_seq,
+ MWIFIEX_MEF_MAX_BYTESEQ)) {
wiphy_err(wiphy, "Pattern not supported\n");
kfree(mef_entry);
return -EOPNOTSUPP;
@@ -2373,16 +2386,16 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
if (!wowlan->patterns[i].pkt_offset) {
if (!(byte_seq[0] & 0x01) &&
- (byte_seq[MAX_BYTESEQ] == 1)) {
+ (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
continue;
} else if (is_broadcast_ether_addr(byte_seq)) {
mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
continue;
} else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
- (byte_seq[MAX_BYTESEQ] == 2)) ||
+ (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) ||
(!memcmp(byte_seq, ipv6_mc_mac, 3) &&
- (byte_seq[MAX_BYTESEQ] == 3))) {
+ (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) {
mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
continue;
}
@@ -2408,7 +2421,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
mef_entry->filter[filt_num].repeat = 16;
memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
ETH_ALEN);
- mef_entry->filter[filt_num].byte_seq[MAX_BYTESEQ] = ETH_ALEN;
+ mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
+ ETH_ALEN;
mef_entry->filter[filt_num].offset = 14;
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
if (filt_num)
@@ -2442,6 +2456,119 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
}
#endif
+static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
+{
+ const u8 ipv4_mc_mac[] = {0x33, 0x33};
+ const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
+ const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff};
+
+ if ((byte_seq[0] & 0x01) &&
+ (byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 1))
+ return PACKET_TYPE_UNICAST;
+ else if (!memcmp(byte_seq, bc_mac, 4))
+ return PACKET_TYPE_BROADCAST;
+ else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
+ byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 2) ||
+ (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
+ byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 3))
+ return PACKET_TYPE_MULTICAST;
+
+ return 0;
+}
+
+static int
+mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
+ struct cfg80211_coalesce_rules *crule,
+ struct mwifiex_coalesce_rule *mrule)
+{
+ u8 byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ + 1];
+ struct filt_field_param *param;
+ int i;
+
+ mrule->max_coalescing_delay = crule->delay;
+
+ param = mrule->params;
+
+ for (i = 0; i < crule->n_patterns; i++) {
+ memset(byte_seq, 0, sizeof(byte_seq));
+ if (!mwifiex_is_pattern_supported(&crule->patterns[i],
+ byte_seq,
+ MWIFIEX_COALESCE_MAX_BYTESEQ)) {
+ dev_err(priv->adapter->dev, "Pattern not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!crule->patterns[i].pkt_offset) {
+ u8 pkt_type;
+
+ pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq);
+ if (pkt_type && mrule->pkt_type) {
+ dev_err(priv->adapter->dev,
+ "Multiple packet types not allowed\n");
+ return -EOPNOTSUPP;
+ } else if (pkt_type) {
+ mrule->pkt_type = pkt_type;
+ continue;
+ }
+ }
+
+ if (crule->condition == NL80211_COALESCE_CONDITION_MATCH)
+ param->operation = RECV_FILTER_MATCH_TYPE_EQ;
+ else
+ param->operation = RECV_FILTER_MATCH_TYPE_NE;
+
+ param->operand_len = byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ];
+ memcpy(param->operand_byte_stream, byte_seq,
+ param->operand_len);
+ param->offset = crule->patterns[i].pkt_offset;
+ param++;
+
+ mrule->num_of_fields++;
+ }
+
+ if (!mrule->pkt_type) {
+ dev_err(priv->adapter->dev,
+ "Packet type can not be determined\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
+ struct cfg80211_coalesce *coalesce)
+{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ int i, ret;
+ struct mwifiex_ds_coalesce_cfg coalesce_cfg;
+ struct mwifiex_private *priv =
+ mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+ memset(&coalesce_cfg, 0, sizeof(coalesce_cfg));
+ if (!coalesce) {
+ dev_dbg(adapter->dev,
+ "Disable coalesce and reset all previous rules\n");
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
+ HostCmd_ACT_GEN_SET, 0,
+ &coalesce_cfg);
+ }
+
+ coalesce_cfg.num_of_rules = coalesce->n_rules;
+ for (i = 0; i < coalesce->n_rules; i++) {
+ ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i],
+ &coalesce_cfg.rule[i]);
+ if (ret) {
+ dev_err(priv->adapter->dev,
+ "Recheck the patterns provided for rule %d\n",
+ i + 1);
+ return ret;
+ }
+ }
+
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &coalesce_cfg);
+}
+
/* station cfg80211 operations */
static struct cfg80211_ops mwifiex_cfg80211_ops = {
.add_virtual_intf = mwifiex_add_virtual_intf,
@@ -2476,12 +2603,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.resume = mwifiex_cfg80211_resume,
.set_wakeup = mwifiex_cfg80211_set_wakeup,
#endif
+ .set_coalesce = mwifiex_cfg80211_set_coalesce,
};
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT,
- .n_patterns = MWIFIEX_MAX_FILTERS,
+ .n_patterns = MWIFIEX_MEF_MAX_FILTERS,
.pattern_min_len = 1,
.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
@@ -2499,6 +2627,15 @@ static bool mwifiex_is_valid_alpha2(const char *alpha2)
return false;
}
+static const struct wiphy_coalesce_support mwifiex_coalesce_support = {
+ .n_rules = MWIFIEX_COALESCE_MAX_RULES,
+ .max_delay = MWIFIEX_MAX_COALESCING_DELAY,
+ .n_patterns = MWIFIEX_COALESCE_MAX_FILTERS,
+ .pattern_min_len = 1,
+ .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
+ .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
+};
+
/*
* This function registers the device with CFG802.11 subsystem.
*
@@ -2560,6 +2697,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->wowlan = &mwifiex_wowlan_support;
#endif
+ wiphy->coalesce = &mwifiex_coalesce_support;
+
wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 5178c4630d89..9eefacbc844b 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -404,11 +404,43 @@ mwifiex_is_rate_auto(struct mwifiex_private *priv)
return false;
}
-/*
- * This function gets the supported data rates.
- *
- * The function works in both Ad-Hoc and infra mode by printing the
- * band and returning the data rates.
+/* This function gets the supported data rates from bitmask inside
+ * cfg80211_scan_request.
+ */
+u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
+ u8 *rates, u8 radio_type)
+{
+ struct wiphy *wiphy = priv->adapter->wiphy;
+ struct cfg80211_scan_request *request = priv->scan_request;
+ u32 num_rates, rate_mask;
+ struct ieee80211_supported_band *sband;
+ int i;
+
+ if (radio_type) {
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (WARN_ON_ONCE(!sband))
+ return 0;
+ rate_mask = request->rates[IEEE80211_BAND_5GHZ];
+ } else {
+ sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (WARN_ON_ONCE(!sband))
+ return 0;
+ rate_mask = request->rates[IEEE80211_BAND_2GHZ];
+ }
+
+ num_rates = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((BIT(i) & rate_mask) == 0)
+ continue; /* skip rate */
+ rates[num_rates++] = (u8)(sband->bitrates[i].bitrate / 5);
+ }
+
+ return num_rates;
+}
+
+/* This function gets the supported data rates. The function works in
+ * both Ad-Hoc and infra mode by printing the band and returning the
+ * data rates.
*/
u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
{
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 94cc09d48444..5c85d7803d00 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -26,6 +26,7 @@
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/ieee80211.h>
+#include <uapi/linux/if_arp.h>
#include <net/mac80211.h>
@@ -75,7 +76,8 @@
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
-#define MWIFIEX_BRIDGED_PKTS_THRESHOLD 1024
+#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
+#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
@@ -151,4 +153,12 @@ struct mwifiex_types_wmm_info {
u8 reserved;
struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
} __packed;
+
+struct mwifiex_arp_eth_header {
+ struct arphdr hdr;
+ u8 ar_sha[ETH_ALEN];
+ u8 ar_sip[4];
+ u8 ar_tha[ETH_ALEN];
+ u8 ar_tip[4];
+} __packed;
#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 1b45aa533300..f80f30b6160e 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -85,9 +85,6 @@ enum KEY_TYPE_ID {
#define WAPI_KEY_LEN 50
#define MAX_POLL_TRIES 100
-
-#define MAX_MULTI_INTERFACE_POLL_TRIES 1000
-
#define MAX_FIRMWARE_POLL_TRIES 100
#define FIRMWARE_READY_SDIO 0xfedc
@@ -156,6 +153,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123)
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
+#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -297,6 +295,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
+#define HostCmd_CMD_COALESCE_CFG 0x010a
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
@@ -453,7 +452,7 @@ enum P2P_MODES {
(((event_cause) >> 24) & 0x00ff)
#define MWIFIEX_MAX_PATTERN_LEN 20
-#define MWIFIEX_MAX_OFFSET_LEN 50
+#define MWIFIEX_MAX_OFFSET_LEN 100
#define STACK_NBYTES 100
#define TYPE_DNUM 1
#define TYPE_BYTESEQ 2
@@ -1331,7 +1330,7 @@ struct mwifiex_ie_types_2040bssco {
struct mwifiex_ie_types_extcap {
struct mwifiex_ie_types_header header;
- u8 ext_cap;
+ u8 ext_capab[0];
} __packed;
struct host_cmd_ds_mac_reg_access {
@@ -1369,11 +1368,6 @@ struct host_cmd_ds_802_11_eeprom_access {
u8 value;
} __packed;
-struct host_cmd_tlv {
- __le16 type;
- __le16 len;
-} __packed;
-
struct mwifiex_assoc_event {
u8 sta_addr[ETH_ALEN];
__le16 type;
@@ -1399,99 +1393,99 @@ struct host_cmd_11ac_vht_cfg {
} __packed;
struct host_cmd_tlv_akmp {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 key_mgmt;
__le16 key_mgmt_operation;
} __packed;
struct host_cmd_tlv_pwk_cipher {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 proto;
u8 cipher;
u8 reserved;
} __packed;
struct host_cmd_tlv_gwk_cipher {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 cipher;
u8 reserved;
} __packed;
struct host_cmd_tlv_passphrase {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 passphrase[0];
} __packed;
struct host_cmd_tlv_wep_key {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 key_index;
u8 is_default;
u8 key[1];
};
struct host_cmd_tlv_auth_type {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 auth_type;
} __packed;
struct host_cmd_tlv_encrypt_protocol {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 proto;
} __packed;
struct host_cmd_tlv_ssid {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 ssid[0];
} __packed;
struct host_cmd_tlv_rates {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 rates[0];
} __packed;
struct host_cmd_tlv_bcast_ssid {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 bcast_ctl;
} __packed;
struct host_cmd_tlv_beacon_period {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 period;
} __packed;
struct host_cmd_tlv_dtim_period {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 period;
} __packed;
struct host_cmd_tlv_frag_threshold {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 frag_thr;
} __packed;
struct host_cmd_tlv_rts_threshold {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 rts_thr;
} __packed;
struct host_cmd_tlv_retry_limit {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 limit;
} __packed;
struct host_cmd_tlv_mac_addr {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 mac_addr[ETH_ALEN];
} __packed;
struct host_cmd_tlv_channel_band {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 band_config;
u8 channel;
} __packed;
struct host_cmd_tlv_ageout_timer {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le32 sta_ao_timer;
} __packed;
@@ -1604,6 +1598,27 @@ struct host_cmd_ds_802_11_cfg_data {
__le16 data_len;
} __packed;
+struct coalesce_filt_field_param {
+ u8 operation;
+ u8 operand_len;
+ __le16 offset;
+ u8 operand_byte_stream[4];
+};
+
+struct coalesce_receive_filt_rule {
+ struct mwifiex_ie_types_header header;
+ u8 num_of_fields;
+ u8 pkt_type;
+ __le16 max_coalescing_delay;
+ struct coalesce_filt_field_param params[0];
+} __packed;
+
+struct host_cmd_ds_coalesce_cfg {
+ __le16 action;
+ __le16 num_of_rules;
+ struct coalesce_receive_filt_rule rule[0];
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -1664,6 +1679,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sta_deauth sta_deauth;
struct host_cmd_11ac_vht_cfg vht_cfg;
struct host_cmd_ds_802_11_cfg_data cfg_data;
+ struct host_cmd_ds_coalesce_cfg coalesce_cfg;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index e38342f86c51..220af4fe0fc6 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -87,7 +87,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
u8 *tmp;
input_len = le16_to_cpu(ie_list->len);
- travel_len = sizeof(struct host_cmd_tlv);
+ travel_len = sizeof(struct mwifiex_ie_types_header);
ie_list->len = 0;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 2cf8b964e966..6499117fce43 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -135,6 +135,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->csa_chan = 0;
priv->csa_expire_time = 0;
+ priv->del_list_idx = 0;
+ priv->hs2_enabled = false;
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -377,18 +379,11 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
- int i;
-
if (!adapter) {
pr_err("%s: adapter is NULL\n", __func__);
return;
}
- for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i])
- del_timer_sync(&adapter->priv[i]->scan_delay_timer);
- }
-
mwifiex_cancel_all_pending_cmd(adapter);
/* Free lock variables */
@@ -398,13 +393,8 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "info: free cmd buffer\n");
mwifiex_free_cmd_buffer(adapter);
- del_timer(&adapter->cmd_timer);
-
dev_dbg(adapter->dev, "info: free scan table\n");
- if (adapter->if_ops.cleanup_if)
- adapter->if_ops.cleanup_if(adapter);
-
if (adapter->sleep_cfm)
dev_kfree_skb_any(adapter->sleep_cfm);
}
@@ -702,7 +692,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
if (!adapter->winner) {
dev_notice(adapter->dev,
"FW already running! Skip FW dnld\n");
- poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
goto poll_fw;
}
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 7f27e45680b5..00a95f4c6a6c 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -362,13 +362,13 @@ struct mwifiex_ds_misc_subsc_evt {
struct subsc_evt_cfg bcn_h_rssi_cfg;
};
-#define MAX_BYTESEQ 6 /* non-adjustable */
-#define MWIFIEX_MAX_FILTERS 10
+#define MWIFIEX_MEF_MAX_BYTESEQ 6 /* non-adjustable */
+#define MWIFIEX_MEF_MAX_FILTERS 10
struct mwifiex_mef_filter {
u16 repeat;
u16 offset;
- s8 byte_seq[MAX_BYTESEQ + 1];
+ s8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
u8 filt_type;
u8 filt_action;
};
@@ -376,7 +376,7 @@ struct mwifiex_mef_filter {
struct mwifiex_mef_entry {
u8 mode;
u8 action;
- struct mwifiex_mef_filter filter[MWIFIEX_MAX_FILTERS];
+ struct mwifiex_mef_filter filter[MWIFIEX_MEF_MAX_FILTERS];
};
struct mwifiex_ds_mef_cfg {
@@ -397,4 +397,39 @@ enum {
MWIFIEX_FUNC_SHUTDOWN,
};
+enum COALESCE_OPERATION {
+ RECV_FILTER_MATCH_TYPE_EQ = 0x80,
+ RECV_FILTER_MATCH_TYPE_NE,
+};
+
+enum COALESCE_PACKET_TYPE {
+ PACKET_TYPE_UNICAST = 1,
+ PACKET_TYPE_MULTICAST = 2,
+ PACKET_TYPE_BROADCAST = 3
+};
+
+#define MWIFIEX_COALESCE_MAX_RULES 8
+#define MWIFIEX_COALESCE_MAX_BYTESEQ 4 /* non-adjustable */
+#define MWIFIEX_COALESCE_MAX_FILTERS 4
+#define MWIFIEX_MAX_COALESCING_DELAY 100 /* in msecs */
+
+struct filt_field_param {
+ u8 operation;
+ u8 operand_len;
+ u16 offset;
+ u8 operand_byte_stream[MWIFIEX_COALESCE_MAX_BYTESEQ];
+};
+
+struct mwifiex_coalesce_rule {
+ u16 max_coalescing_delay;
+ u8 num_of_fields;
+ u8 pkt_type;
+ struct filt_field_param params[MWIFIEX_COALESCE_MAX_FILTERS];
+};
+
+struct mwifiex_ds_coalesce_cfg {
+ u16 num_of_rules;
+ struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
+};
+
#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 12e778159ec5..9d7c0e6c4fc7 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1427,6 +1427,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
switch (priv->bss_mode) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
return mwifiex_deauthenticate_infra(priv, mac);
case NL80211_IFTYPE_ADHOC:
return mwifiex_send_cmd_sync(priv,
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 1753431de361..fd778337deee 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -191,12 +191,16 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
{
s32 i;
+ if (adapter->if_ops.cleanup_if)
+ adapter->if_ops.cleanup_if(adapter);
+
del_timer(&adapter->cmd_timer);
/* Free private structures */
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
mwifiex_free_curr_bcn(adapter->priv[i]);
+ del_timer_sync(&adapter->priv[i]->scan_delay_timer);
kfree(adapter->priv[i]);
}
}
@@ -386,6 +390,17 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
}
/*
+ * This function cancels all works in the queue and destroys
+ * the main workqueue.
+ */
+static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
+{
+ flush_workqueue(adapter->workqueue);
+ destroy_workqueue(adapter->workqueue);
+ adapter->workqueue = NULL;
+}
+
+/*
* This function gets firmware and initializes it.
*
* The main initialization steps followed are -
@@ -394,16 +409,18 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
*/
static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
{
- int ret;
+ int ret, i;
char fmt[64];
struct mwifiex_private *priv;
struct mwifiex_adapter *adapter = context;
struct mwifiex_fw_image fw;
+ struct semaphore *sem = adapter->card_sem;
+ bool init_failed = false;
if (!firmware) {
dev_err(adapter->dev,
"Failed to get firmware %s\n", adapter->fw_name);
- goto done;
+ goto err_dnld_fw;
}
memset(&fw, 0, sizeof(struct mwifiex_fw_image));
@@ -416,7 +433,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
else
ret = mwifiex_dnld_fw(adapter, &fw);
if (ret == -1)
- goto done;
+ goto err_dnld_fw;
dev_notice(adapter->dev, "WLAN FW is active\n");
@@ -428,13 +445,15 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
}
/* enable host interrupt after fw dnld is successful */
- if (adapter->if_ops.enable_int)
- adapter->if_ops.enable_int(adapter);
+ if (adapter->if_ops.enable_int) {
+ if (adapter->if_ops.enable_int(adapter))
+ goto err_dnld_fw;
+ }
adapter->init_wait_q_woken = false;
ret = mwifiex_init_fw(adapter);
if (ret == -1) {
- goto done;
+ goto err_init_fw;
} else if (!ret) {
adapter->hw_status = MWIFIEX_HW_STATUS_READY;
goto done;
@@ -443,12 +462,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
wait_event_interruptible(adapter->init_wait_q,
adapter->init_wait_q_woken);
if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
- goto done;
+ goto err_init_fw;
priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
if (mwifiex_register_cfg80211(adapter)) {
dev_err(adapter->dev, "cannot register with cfg80211\n");
- goto err_init_fw;
+ goto err_register_cfg80211;
}
rtnl_lock();
@@ -458,20 +477,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
dev_err(adapter->dev, "cannot create default STA interface\n");
goto err_add_intf;
}
-
- /* Create AP interface by default */
- if (!mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
- NL80211_IFTYPE_AP, NULL, NULL)) {
- dev_err(adapter->dev, "cannot create default AP interface\n");
- goto err_add_intf;
- }
-
- /* Create P2P interface by default */
- if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
- NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
- dev_err(adapter->dev, "cannot create default P2P interface\n");
- goto err_add_intf;
- }
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -479,20 +484,52 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
- mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+
+ if (!priv)
+ continue;
+
+ if (priv->wdev && priv->netdev)
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
+ }
rtnl_unlock();
+err_register_cfg80211:
+ wiphy_unregister(adapter->wiphy);
+ wiphy_free(adapter->wiphy);
err_init_fw:
if (adapter->if_ops.disable_int)
adapter->if_ops.disable_int(adapter);
+err_dnld_fw:
pr_debug("info: %s: unregister device\n", __func__);
- adapter->if_ops.unregister_dev(adapter);
+ if (adapter->if_ops.unregister_dev)
+ adapter->if_ops.unregister_dev(adapter);
+
+ if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
+ (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
+ pr_debug("info: %s: shutdown mwifiex\n", __func__);
+ adapter->init_wait_q_woken = false;
+
+ if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
+ wait_event_interruptible(adapter->init_wait_q,
+ adapter->init_wait_q_woken);
+ }
+ adapter->surprise_removed = true;
+ mwifiex_terminate_workqueue(adapter);
+ init_failed = true;
done:
if (adapter->cal_data) {
release_firmware(adapter->cal_data);
adapter->cal_data = NULL;
}
- release_firmware(adapter->firmware);
+ if (adapter->firmware) {
+ release_firmware(adapter->firmware);
+ adapter->firmware = NULL;
+ }
complete(&adapter->fw_load);
+ if (init_failed)
+ mwifiex_free_adapter(adapter);
+ up(sem);
return;
}
@@ -803,18 +840,6 @@ static void mwifiex_main_work_queue(struct work_struct *work)
}
/*
- * This function cancels all works in the queue and destroys
- * the main workqueue.
- */
-static void
-mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
-{
- flush_workqueue(adapter->workqueue);
- destroy_workqueue(adapter->workqueue);
- adapter->workqueue = NULL;
-}
-
-/*
* This function adds the card.
*
* This function follows the following major steps to set up the device -
@@ -842,6 +867,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
}
adapter->iface_type = iface_type;
+ adapter->card_sem = sem;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
adapter->surprise_removed = false;
@@ -872,17 +898,12 @@ mwifiex_add_card(void *card, struct semaphore *sem,
goto err_init_fw;
}
- up(sem);
return 0;
err_init_fw:
pr_debug("info: %s: unregister device\n", __func__);
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
-err_registerdev:
- adapter->surprise_removed = true;
- mwifiex_terminate_workqueue(adapter);
-err_kmalloc:
if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
(adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
@@ -892,7 +913,10 @@ err_kmalloc:
wait_event_interruptible(adapter->init_wait_q,
adapter->init_wait_q_woken);
}
-
+err_registerdev:
+ adapter->surprise_removed = true;
+ mwifiex_terminate_workqueue(adapter);
+err_kmalloc:
mwifiex_free_adapter(adapter);
err_init_sw:
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 253e0bd38e25..1d72f13adb9d 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -204,11 +204,11 @@ struct mwifiex_ra_list_tbl {
struct list_head list;
struct sk_buff_head skb_head;
u8 ra[ETH_ALEN];
- u32 total_pkts_size;
u32 is_11n_enabled;
u16 max_amsdu;
- u16 pkt_count;
+ u16 ba_pkt_count;
u8 ba_packet_thr;
+ u16 total_pkt_count;
};
struct mwifiex_tid_tbl {
@@ -515,6 +515,8 @@ struct mwifiex_private {
bool scan_aborting;
u8 csa_chan;
unsigned long csa_expire_time;
+ u8 del_list_idx;
+ bool hs2_enabled;
};
enum mwifiex_ba_status {
@@ -748,6 +750,7 @@ struct mwifiex_adapter {
atomic_t is_tx_received;
atomic_t pending_bridged_pkts;
+ struct semaphore *card_sem;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -900,6 +903,8 @@ int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv,
u8 *rates);
u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates);
+u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
+ u8 *rates, u8 radio_type);
u8 mwifiex_is_rate_auto(struct mwifiex_private *priv);
extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE];
void mwifiex_save_curr_bcn(struct mwifiex_private *priv);
@@ -1021,7 +1026,7 @@ mwifiex_netdev_get_priv(struct net_device *dev)
*/
static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
{
- return (*(u32 *)skb->data == PKT_TYPE_MGMT);
+ return (le32_to_cpu(*(__le32 *)skb->data) == PKT_TYPE_MGMT);
}
/* This function retrieves channel closed for operation by Channel
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 20c9c4c7b0b2..52da8ee7599a 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -76,7 +76,7 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
return false;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* Kernel needs to suspend all functions separately. Therefore all
* registered functions must have drivers with suspend and resume
@@ -85,11 +85,12 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
* If already not suspended, this function allocates and sends a host
* sleep activate request to the firmware and turns off the traffic.
*/
-static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+static int mwifiex_pcie_suspend(struct device *dev)
{
struct mwifiex_adapter *adapter;
struct pcie_service_card *card;
int hs_actived;
+ struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -120,10 +121,11 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
* If already not resumed, this function turns on the traffic and
* sends a host sleep cancel request to the firmware.
*/
-static int mwifiex_pcie_resume(struct pci_dev *pdev)
+static int mwifiex_pcie_resume(struct device *dev)
{
struct mwifiex_adapter *adapter;
struct pcie_service_card *card;
+ struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -211,9 +213,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
wait_for_completion(&adapter->fw_load);
if (user_rmmod) {
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
if (adapter->is_suspended)
- mwifiex_pcie_resume(pdev);
+ mwifiex_pcie_resume(&pdev->dev);
#endif
for (i = 0; i < adapter->priv_num; i++)
@@ -233,6 +235,14 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
kfree(card);
}
+static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
+{
+ user_rmmod = 1;
+ mwifiex_pcie_remove(pdev);
+
+ return;
+}
+
static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
{
PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
@@ -249,17 +259,24 @@ static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
MODULE_DEVICE_TABLE(pci, mwifiex_ids);
+#ifdef CONFIG_PM_SLEEP
+/* Power Management Hooks */
+static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
+ mwifiex_pcie_resume);
+#endif
+
/* PCI Device Driver */
static struct pci_driver __refdata mwifiex_pcie = {
.name = "mwifiex_pcie",
.id_table = mwifiex_ids,
.probe = mwifiex_pcie_probe,
.remove = mwifiex_pcie_remove,
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = mwifiex_pcie_suspend,
- .resume = mwifiex_pcie_resume,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &mwifiex_pcie_pm_ops,
+ },
#endif
+ .shutdown = mwifiex_pcie_shutdown,
};
/*
@@ -1925,7 +1942,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
ret = 0;
break;
} else {
- mdelay(100);
+ msleep(100);
ret = -1;
}
}
@@ -1937,12 +1954,10 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
else if (!winner_status) {
dev_err(adapter->dev, "PCI-E is the winner\n");
adapter->winner = 1;
- ret = -1;
} else {
dev_err(adapter->dev,
"PCI-E is not the winner <%#x,%d>, exit dnld\n",
ret, adapter->winner);
- ret = 0;
}
}
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index c447d9bd1aa9..8cf7d50a7603 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -543,6 +543,37 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
return chan_idx;
}
+/* This function appends rate TLV to scan config command. */
+static int
+mwifiex_append_rate_tlv(struct mwifiex_private *priv,
+ struct mwifiex_scan_cmd_config *scan_cfg_out,
+ u8 radio)
+{
+ struct mwifiex_ie_types_rates_param_set *rates_tlv;
+ u8 rates[MWIFIEX_SUPPORTED_RATES], *tlv_pos;
+ u32 rates_size;
+
+ memset(rates, 0, sizeof(rates));
+
+ tlv_pos = (u8 *)scan_cfg_out->tlv_buf + scan_cfg_out->tlv_buf_len;
+
+ if (priv->scan_request)
+ rates_size = mwifiex_get_rates_from_cfg80211(priv, rates,
+ radio);
+ else
+ rates_size = mwifiex_get_supported_rates(priv, rates);
+
+ dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n",
+ rates_size);
+ rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos;
+ rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
+ rates_tlv->header.len = cpu_to_le16((u16) rates_size);
+ memcpy(rates_tlv->rates, rates, rates_size);
+ scan_cfg_out->tlv_buf_len += sizeof(rates_tlv->header) + rates_size;
+
+ return rates_size;
+}
+
/*
* This function constructs and sends multiple scan config commands to
* the firmware.
@@ -564,9 +595,10 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
struct mwifiex_chan_scan_param_set *tmp_chan_list;
struct mwifiex_chan_scan_param_set *start_chan;
- u32 tlv_idx;
+ u32 tlv_idx, rates_size;
u32 total_scan_time;
u32 done_early;
+ u8 radio_type;
if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
dev_dbg(priv->adapter->dev,
@@ -591,6 +623,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
tlv_idx = 0;
total_scan_time = 0;
+ radio_type = 0;
chan_tlv_out->header.len = 0;
start_chan = tmp_chan_list;
done_early = false;
@@ -612,6 +645,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
continue;
}
+ radio_type = tmp_chan_list->radio_type;
dev_dbg(priv->adapter->dev,
"info: Scan: Chan(%3d), Radio(%d),"
" Mode(%d, %d), Dur(%d)\n",
@@ -692,6 +726,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
break;
}
+ rates_size = mwifiex_append_rate_tlv(priv, scan_cfg_out,
+ radio_type);
+
priv->adapter->scan_channels = start_chan;
/* Send the scan command to the firmware with the specified
@@ -699,6 +736,14 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
HostCmd_ACT_GEN_SET, 0,
scan_cfg_out);
+
+ /* rate IE is updated per scan command but same starting
+ * pointer is used each time so that rate IE from earlier
+ * scan_cfg_out->buf is overwritten with new one.
+ */
+ scan_cfg_out->tlv_buf_len -=
+ sizeof(struct mwifiex_ie_types_header) + rates_size;
+
if (ret)
break;
}
@@ -741,7 +786,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
- struct mwifiex_ie_types_rates_param_set *rates_tlv;
u8 *tlv_pos;
u32 num_probes;
u32 ssid_len;
@@ -753,8 +797,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
u8 radio_type;
int i;
u8 ssid_filter;
- u8 rates[MWIFIEX_SUPPORTED_RATES];
- u32 rates_size;
struct mwifiex_ie_types_htcap *ht_cap;
/* The tlv_buf_len is calculated for each scan command. The TLVs added
@@ -889,19 +931,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
}
- /* Append rates tlv */
- memset(rates, 0, sizeof(rates));
-
- rates_size = mwifiex_get_supported_rates(priv, rates);
-
- rates_tlv = (struct mwifiex_ie_types_rates_param_set *) tlv_pos;
- rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
- rates_tlv->header.len = cpu_to_le16((u16) rates_size);
- memcpy(rates_tlv->rates, rates, rates_size);
- tlv_pos += sizeof(rates_tlv->header) + rates_size;
-
- dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size);
-
if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
(priv->adapter->config_bands & BAND_GN ||
priv->adapter->config_bands & BAND_AN)) {
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 09185c963248..1576104e3d95 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -50,9 +50,6 @@ static struct mwifiex_if_ops sdio_ops;
static struct semaphore add_remove_card_sem;
-static int mwifiex_sdio_resume(struct device *dev);
-static void mwifiex_sdio_interrupt(struct sdio_func *func);
-
/*
* SDIO probe.
*
@@ -113,6 +110,51 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
}
/*
+ * SDIO resume.
+ *
+ * Kernel needs to suspend all functions separately. Therefore all
+ * registered functions must have drivers with suspend and resume
+ * methods. Failing that the kernel simply removes the whole card.
+ *
+ * If already not resumed, this function turns on the traffic and
+ * sends a host sleep cancel request to the firmware.
+ */
+static int mwifiex_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct sdio_mmc_card *card;
+ struct mwifiex_adapter *adapter;
+ mmc_pm_flag_t pm_flag = 0;
+
+ if (func) {
+ pm_flag = sdio_get_host_pm_caps(func);
+ card = sdio_get_drvdata(func);
+ if (!card || !card->adapter) {
+ pr_err("resume: invalid card or adapter\n");
+ return 0;
+ }
+ } else {
+ pr_err("resume: sdio_func is not specified\n");
+ return 0;
+ }
+
+ adapter = card->adapter;
+
+ if (!adapter->is_suspended) {
+ dev_warn(adapter->dev, "device already resumed\n");
+ return 0;
+ }
+
+ adapter->is_suspended = false;
+
+ /* Disable Host Sleep */
+ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
+ MWIFIEX_ASYNC_CMD);
+
+ return 0;
+}
+
+/*
* SDIO remove.
*
* This function removes the interface and frees up the card structure.
@@ -212,51 +254,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
return ret;
}
-/*
- * SDIO resume.
- *
- * Kernel needs to suspend all functions separately. Therefore all
- * registered functions must have drivers with suspend and resume
- * methods. Failing that the kernel simply removes the whole card.
- *
- * If already not resumed, this function turns on the traffic and
- * sends a host sleep cancel request to the firmware.
- */
-static int mwifiex_sdio_resume(struct device *dev)
-{
- struct sdio_func *func = dev_to_sdio_func(dev);
- struct sdio_mmc_card *card;
- struct mwifiex_adapter *adapter;
- mmc_pm_flag_t pm_flag = 0;
-
- if (func) {
- pm_flag = sdio_get_host_pm_caps(func);
- card = sdio_get_drvdata(func);
- if (!card || !card->adapter) {
- pr_err("resume: invalid card or adapter\n");
- return 0;
- }
- } else {
- pr_err("resume: sdio_func is not specified\n");
- return 0;
- }
-
- adapter = card->adapter;
-
- if (!adapter->is_suspended) {
- dev_warn(adapter->dev, "device already resumed\n");
- return 0;
- }
-
- adapter->is_suspended = false;
-
- /* Disable Host Sleep */
- mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
- MWIFIEX_ASYNC_CMD);
-
- return 0;
-}
-
/* Device ID for SD8786 */
#define SDIO_DEVICE_ID_MARVELL_8786 (0x9116)
/* Device ID for SD8787 */
@@ -707,6 +704,65 @@ static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
}
/*
+ * This function reads the interrupt status from card.
+ */
+static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ u8 sdio_ireg;
+ unsigned long flags;
+
+ if (mwifiex_read_data_sync(adapter, card->mp_regs,
+ card->reg->max_mp_regs,
+ REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
+ dev_err(adapter->dev, "read mp_regs failed\n");
+ return;
+ }
+
+ sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
+ if (sdio_ireg) {
+ /*
+ * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
+ * For SDIO new mode CMD port interrupts
+ * DN_LD_CMD_PORT_HOST_INT_STATUS and/or
+ * UP_LD_CMD_PORT_HOST_INT_STATUS
+ * Clear the interrupt status register
+ */
+ dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
+ spin_lock_irqsave(&adapter->int_lock, flags);
+ adapter->int_status |= sdio_ireg;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+ }
+}
+
+/*
+ * SDIO interrupt handler.
+ *
+ * This function reads the interrupt status from firmware and handles
+ * the interrupt in current thread (ksdioirqd) right away.
+ */
+static void
+mwifiex_sdio_interrupt(struct sdio_func *func)
+{
+ struct mwifiex_adapter *adapter;
+ struct sdio_mmc_card *card;
+
+ card = sdio_get_drvdata(func);
+ if (!card || !card->adapter) {
+ pr_debug("int: func=%p card=%p adapter=%p\n",
+ func, card, card ? card->adapter : NULL);
+ return;
+ }
+ adapter = card->adapter;
+
+ if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
+ adapter->ps_state = PS_STATE_AWAKE;
+
+ mwifiex_interrupt_status(adapter);
+ mwifiex_main_process(adapter);
+}
+
+/*
* This function enables the host interrupt.
*
* The host interrupt enable mask is written to the card
@@ -944,7 +1000,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
ret = 0;
break;
} else {
- mdelay(100);
+ msleep(100);
ret = -1;
}
}
@@ -963,65 +1019,6 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
}
/*
- * This function reads the interrupt status from card.
- */
-static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
-{
- struct sdio_mmc_card *card = adapter->card;
- u8 sdio_ireg;
- unsigned long flags;
-
- if (mwifiex_read_data_sync(adapter, card->mp_regs,
- card->reg->max_mp_regs,
- REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
- dev_err(adapter->dev, "read mp_regs failed\n");
- return;
- }
-
- sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
- if (sdio_ireg) {
- /*
- * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
- * For SDIO new mode CMD port interrupts
- * DN_LD_CMD_PORT_HOST_INT_STATUS and/or
- * UP_LD_CMD_PORT_HOST_INT_STATUS
- * Clear the interrupt status register
- */
- dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
- spin_lock_irqsave(&adapter->int_lock, flags);
- adapter->int_status |= sdio_ireg;
- spin_unlock_irqrestore(&adapter->int_lock, flags);
- }
-}
-
-/*
- * SDIO interrupt handler.
- *
- * This function reads the interrupt status from firmware and handles
- * the interrupt in current thread (ksdioirqd) right away.
- */
-static void
-mwifiex_sdio_interrupt(struct sdio_func *func)
-{
- struct mwifiex_adapter *adapter;
- struct sdio_mmc_card *card;
-
- card = sdio_get_drvdata(func);
- if (!card || !card->adapter) {
- pr_debug("int: func=%p card=%p adapter=%p\n",
- func, card, card ? card->adapter : NULL);
- return;
- }
- adapter = card->adapter;
-
- if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
- adapter->ps_state = PS_STATE_AWAKE;
-
- mwifiex_interrupt_status(adapter);
- mwifiex_main_process(adapter);
-}
-
-/*
* This function decodes a received packet.
*
* Based on the type, the packet is treated as either a data, or
@@ -1065,7 +1062,7 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
case MWIFIEX_TYPE_EVENT:
dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
- adapter->event_cause = *(u32 *) skb->data;
+ adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
memcpy(adapter->event_body,
@@ -1210,8 +1207,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
/* get curr PKT len & type */
- pkt_len = *(u16 *) &curr_ptr[0];
- pkt_type = *(u16 *) &curr_ptr[2];
+ pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
+ pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
/* copy pkt to deaggr buf */
skb_deaggr = card->mpa_rx.skb_arr[pind];
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 8ece48580642..c0268b597748 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -707,8 +707,9 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
tlv_mac = (void *)((u8 *)&key_material->key_param_set +
key_param_len);
- tlv_mac->tlv.type = cpu_to_le16(TLV_TYPE_STA_MAC_ADDR);
- tlv_mac->tlv.len = cpu_to_le16(ETH_ALEN);
+ tlv_mac->header.type =
+ cpu_to_le16(TLV_TYPE_STA_MAC_ADDR);
+ tlv_mac->header.len = cpu_to_le16(ETH_ALEN);
memcpy(tlv_mac->mac_addr, enc_key->mac_addr, ETH_ALEN);
cmd_size = key_param_len + S_DS_GEN +
sizeof(key_material->action) +
@@ -1069,7 +1070,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
int i, byte_len;
u8 *stack_ptr = *buffer;
- for (i = 0; i < MWIFIEX_MAX_FILTERS; i++) {
+ for (i = 0; i < MWIFIEX_MEF_MAX_FILTERS; i++) {
filter = &mef_entry->filter[i];
if (!filter->filt_type)
break;
@@ -1078,7 +1079,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
*stack_ptr = TYPE_DNUM;
stack_ptr += 1;
- byte_len = filter->byte_seq[MAX_BYTESEQ];
+ byte_len = filter->byte_seq[MWIFIEX_MEF_MAX_BYTESEQ];
memcpy(stack_ptr, filter->byte_seq, byte_len);
stack_ptr += byte_len;
*stack_ptr = byte_len;
@@ -1183,6 +1184,70 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_coalesce_cfg *coalesce_cfg =
+ &cmd->params.coalesce_cfg;
+ struct mwifiex_ds_coalesce_cfg *cfg = data_buf;
+ struct coalesce_filt_field_param *param;
+ u16 cnt, idx, length;
+ struct coalesce_receive_filt_rule *rule;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_COALESCE_CFG);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+
+ coalesce_cfg->action = cpu_to_le16(cmd_action);
+ coalesce_cfg->num_of_rules = cpu_to_le16(cfg->num_of_rules);
+ rule = coalesce_cfg->rule;
+
+ for (cnt = 0; cnt < cfg->num_of_rules; cnt++) {
+ rule->header.type = cpu_to_le16(TLV_TYPE_COALESCE_RULE);
+ rule->max_coalescing_delay =
+ cpu_to_le16(cfg->rule[cnt].max_coalescing_delay);
+ rule->pkt_type = cfg->rule[cnt].pkt_type;
+ rule->num_of_fields = cfg->rule[cnt].num_of_fields;
+
+ length = 0;
+
+ param = rule->params;
+ for (idx = 0; idx < cfg->rule[cnt].num_of_fields; idx++) {
+ param->operation = cfg->rule[cnt].params[idx].operation;
+ param->operand_len =
+ cfg->rule[cnt].params[idx].operand_len;
+ param->offset =
+ cpu_to_le16(cfg->rule[cnt].params[idx].offset);
+ memcpy(param->operand_byte_stream,
+ cfg->rule[cnt].params[idx].operand_byte_stream,
+ param->operand_len);
+
+ length += sizeof(struct coalesce_filt_field_param);
+
+ param++;
+ }
+
+ /* Total rule length is sizeof max_coalescing_delay(u16),
+ * num_of_fields(u8), pkt_type(u8) and total length of the all
+ * params
+ */
+ rule->header.len = cpu_to_le16(length + sizeof(u16) +
+ sizeof(u8) + sizeof(u8));
+
+ /* Add the rule length to the command size*/
+ le16_add_cpu(&cmd->size, le16_to_cpu(rule->header.len) +
+ sizeof(struct mwifiex_ie_types_header));
+
+ rule = (void *)((u8 *)rule->params + length);
+ }
+
+ /* Add sizeof action, num_of_rules to total command length */
+ le16_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16));
+
+ return 0;
+}
+
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1406,6 +1471,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_MEF_CFG:
ret = mwifiex_cmd_mef_cfg(priv, cmd_ptr, data_buf);
break;
+ case HostCmd_CMD_COALESCE_CFG:
+ ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index d85df158cc6c..58a6013712d2 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -280,7 +280,7 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
tlv_buf = ((u8 *)rate_cfg) +
sizeof(struct host_cmd_ds_tx_rate_cfg);
- tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16));
+ tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16)));
while (tlv_buf && tlv_buf_len > 0) {
tlv = (*tlv_buf);
@@ -997,6 +997,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_MEF_CFG:
break;
+ case HostCmd_CMD_COALESCE_CFG:
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index ea265ec0e522..8b057524b252 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -201,6 +201,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_DEAUTHENTICATED:
dev_dbg(adapter->dev, "event: Deauthenticated\n");
+ if (priv->wps.session_enable) {
+ dev_dbg(adapter->dev,
+ "info: receive deauth event in wps session\n");
+ break;
+ }
adapter->dbg.num_event_deauth++;
if (priv->media_connected) {
reason_code =
@@ -211,6 +216,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_DISASSOCIATED:
dev_dbg(adapter->dev, "event: Disassociated\n");
+ if (priv->wps.session_enable) {
+ dev_dbg(adapter->dev,
+ "info: receive disassoc event in wps session\n");
+ break;
+ }
adapter->dbg.num_event_disassoc++;
if (priv->media_connected) {
reason_code =
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 8af97abf7108..f084412eee0b 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -797,15 +797,16 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
u8 *ie_data_ptr, u16 ie_len)
{
if (ie_len) {
- priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
- if (!priv->wps_ie)
- return -ENOMEM;
- if (ie_len > sizeof(priv->wps_ie)) {
+ if (ie_len > MWIFIEX_MAX_VSIE_LEN) {
dev_dbg(priv->adapter->dev,
"info: failed to copy WPS IE, too big\n");
- kfree(priv->wps_ie);
return -1;
}
+
+ priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
+ if (!priv->wps_ie)
+ return -ENOMEM;
+
memcpy(priv->wps_ie, ie_data_ptr, ie_len);
priv->wps_ie_len = ie_len;
dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index b5c109504393..bb22664923ef 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -17,6 +17,8 @@
* this warranty disclaimer.
*/
+#include <uapi/linux/ipv6.h>
+#include <net/ndisc.h>
#include "decl.h"
#include "ioctl.h"
#include "util.h"
@@ -25,6 +27,46 @@
#include "11n_aggr.h"
#include "11n_rxreorder.h"
+/* This function checks if a frame is IPv4 ARP or IPv6 Neighbour advertisement
+ * frame. If frame has both source and destination mac address as same, this
+ * function drops such gratuitous frames.
+ */
+static bool
+mwifiex_discard_gratuitous_arp(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ const struct mwifiex_arp_eth_header *arp;
+ struct ethhdr *eth_hdr;
+ struct ipv6hdr *ipv6;
+ struct icmp6hdr *icmpv6;
+
+ eth_hdr = (struct ethhdr *)skb->data;
+ switch (ntohs(eth_hdr->h_proto)) {
+ case ETH_P_ARP:
+ arp = (void *)(skb->data + sizeof(struct ethhdr));
+ if (arp->hdr.ar_op == htons(ARPOP_REPLY) ||
+ arp->hdr.ar_op == htons(ARPOP_REQUEST)) {
+ if (!memcmp(arp->ar_sip, arp->ar_tip, 4))
+ return true;
+ }
+ break;
+ case ETH_P_IPV6:
+ ipv6 = (void *)(skb->data + sizeof(struct ethhdr));
+ icmpv6 = (void *)(skb->data + sizeof(struct ethhdr) +
+ sizeof(struct ipv6hdr));
+ if (NDISC_NEIGHBOUR_ADVERTISEMENT == icmpv6->icmp6_type) {
+ if (!memcmp(&ipv6->saddr, &ipv6->daddr,
+ sizeof(struct in6_addr)))
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
/*
* This function processes the received packet and forwards it
* to kernel/upper layer.
@@ -90,6 +132,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
either the reconstructed EthII frame or the 802.2/llc/snap frame */
skb_pull(skb, hdr_chop);
+ if (priv->hs2_enabled &&
+ mwifiex_discard_gratuitous_arp(priv, skb)) {
+ dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n");
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
priv->rxpd_rate = local_rx_pd->rx_rate;
priv->rxpd_htinfo = local_rx_pd->ht_info;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 2de882dead0f..64424c81b44f 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -293,9 +293,9 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
u8 *tlv = *tlv_buf;
tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
- tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
- tlv_akmp->tlv.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
- sizeof(struct host_cmd_tlv));
+ tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
+ tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
+ sizeof(struct mwifiex_ie_types_header));
tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
cmd_size += sizeof(struct host_cmd_tlv_akmp);
@@ -303,10 +303,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
- pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
- pwk_cipher->tlv.len =
+ pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
@@ -315,10 +315,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
- pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
- pwk_cipher->tlv.len =
+ pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
@@ -327,10 +327,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
- gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
- gwk_cipher->tlv.len =
+ gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
+ gwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
@@ -338,13 +338,15 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
if (bss_cfg->wpa_cfg.length) {
passphrase = (struct host_cmd_tlv_passphrase *)tlv;
- passphrase->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
- passphrase->tlv.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
+ passphrase->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
+ passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
bss_cfg->wpa_cfg.length);
- cmd_size += sizeof(struct host_cmd_tlv) +
+ cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
- tlv += sizeof(struct host_cmd_tlv) + bss_cfg->wpa_cfg.length;
+ tlv += sizeof(struct mwifiex_ie_types_header) +
+ bss_cfg->wpa_cfg.length;
}
*param_size = cmd_size;
@@ -403,16 +405,17 @@ mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
(bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
wep_key = (struct host_cmd_tlv_wep_key *)tlv;
- wep_key->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
- wep_key->tlv.len =
+ wep_key->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
+ wep_key->header.len =
cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
bss_cfg->wep_cfg[i].length);
- cmd_size += sizeof(struct host_cmd_tlv) + 2 +
+ cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
- tlv += sizeof(struct host_cmd_tlv) + 2 +
+ tlv += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
}
}
@@ -449,16 +452,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->ssid.ssid_len) {
ssid = (struct host_cmd_tlv_ssid *)tlv;
- ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
- ssid->tlv.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
+ ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
+ ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
- cmd_size += sizeof(struct host_cmd_tlv) +
+ cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
- tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+ tlv += sizeof(struct mwifiex_ie_types_header) +
+ bss_cfg->ssid.ssid_len;
bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
- bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
- bcast_ssid->tlv.len =
+ bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
+ bcast_ssid->header.len =
cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
@@ -466,13 +470,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
}
if (bss_cfg->rates[0]) {
tlv_rates = (struct host_cmd_tlv_rates *)tlv;
- tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
+ tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
i++)
tlv_rates->rates[i] = bss_cfg->rates[i];
- tlv_rates->tlv.len = cpu_to_le16(i);
+ tlv_rates->header.len = cpu_to_le16(i);
cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
tlv += sizeof(struct host_cmd_tlv_rates) + i;
}
@@ -482,10 +486,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
(bss_cfg->band_cfg == BAND_CONFIG_A &&
bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
chan_band = (struct host_cmd_tlv_channel_band *)tlv;
- chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
- chan_band->tlv.len =
+ chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
+ chan_band->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
chan_band->band_config = bss_cfg->band_cfg;
chan_band->channel = bss_cfg->channel;
cmd_size += sizeof(struct host_cmd_tlv_channel_band);
@@ -494,11 +498,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
- beacon_period->tlv.type =
+ beacon_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
- beacon_period->tlv.len =
+ beacon_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
tlv += sizeof(struct host_cmd_tlv_beacon_period);
@@ -506,21 +510,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
- dtim_period->tlv.type = cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
- dtim_period->tlv.len =
+ dtim_period->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
+ dtim_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
dtim_period->period = bss_cfg->dtim_period;
cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
tlv += sizeof(struct host_cmd_tlv_dtim_period);
}
if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
- rts_threshold->tlv.type =
+ rts_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
- rts_threshold->tlv.len =
+ rts_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
@@ -528,21 +533,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
(bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
- frag_threshold->tlv.type =
+ frag_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
- frag_threshold->tlv.len =
+ frag_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
- retry_limit->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
- retry_limit->tlv.len =
+ retry_limit->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
+ retry_limit->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
retry_limit->limit = (u8)bss_cfg->retry_limit;
cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
tlv += sizeof(struct host_cmd_tlv_retry_limit);
@@ -557,21 +563,21 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
(bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
auth_type = (struct host_cmd_tlv_auth_type *)tlv;
- auth_type->tlv.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
- auth_type->tlv.len =
+ auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
+ auth_type->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
auth_type->auth_type = (u8)bss_cfg->auth_mode;
cmd_size += sizeof(struct host_cmd_tlv_auth_type);
tlv += sizeof(struct host_cmd_tlv_auth_type);
}
if (bss_cfg->protocol) {
encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
- encrypt_protocol->tlv.type =
+ encrypt_protocol->header.type =
cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
- encrypt_protocol->tlv.len =
+ encrypt_protocol->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
- - sizeof(struct host_cmd_tlv));
+ - sizeof(struct mwifiex_ie_types_header));
encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
@@ -608,9 +614,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->sta_ao_timer) {
ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
- ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
- ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) -
- sizeof(struct host_cmd_tlv));
+ ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
+ ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) -
+ sizeof(struct mwifiex_ie_types_header));
ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
cmd_size += sizeof(*ao_timer);
tlv += sizeof(*ao_timer);
@@ -618,9 +624,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->ps_sta_ao_timer) {
ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
- ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
- ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) -
- sizeof(struct host_cmd_tlv));
+ ps_ao_timer->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
+ ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) -
+ sizeof(struct mwifiex_ie_types_header));
ps_ao_timer->sta_ao_timer =
cpu_to_le32(bss_cfg->ps_sta_ao_timer);
cmd_size += sizeof(*ps_ao_timer);
@@ -636,16 +643,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
{
struct mwifiex_ie_list *ap_ie = cmd_buf;
- struct host_cmd_tlv *tlv_ie = (struct host_cmd_tlv *)tlv;
+ struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
return -1;
- *ie_size += le16_to_cpu(ap_ie->len) + sizeof(struct host_cmd_tlv);
+ *ie_size += le16_to_cpu(ap_ie->len) +
+ sizeof(struct mwifiex_ie_types_header);
tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
tlv_ie->len = ap_ie->len;
- tlv += sizeof(struct host_cmd_tlv);
+ tlv += sizeof(struct mwifiex_ie_types_header);
memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index a018e42d117e..1cfe5a738c47 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -24,6 +24,69 @@
#include "11n_aggr.h"
#include "11n_rxreorder.h"
+/* This function checks if particular RA list has packets more than low bridge
+ * packet threshold and then deletes packet from this RA list.
+ * Function deletes packets from such RA list and returns true. If no such list
+ * is found, false is returned.
+ */
+static bool
+mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv,
+ struct list_head *ra_list_head)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct sk_buff *skb, *tmp;
+ bool pkt_deleted = false;
+ struct mwifiex_txinfo *tx_info;
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ list_for_each_entry(ra_list, ra_list_head, list) {
+ if (skb_queue_empty(&ra_list->skb_head))
+ continue;
+
+ skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
+ __skb_unlink(skb, &ra_list->skb_head);
+ mwifiex_write_data_complete(adapter, skb, 0,
+ -1);
+ atomic_dec(&priv->wmm.tx_pkts_queued);
+ pkt_deleted = true;
+ }
+ if ((atomic_read(&adapter->pending_bridged_pkts) <=
+ MWIFIEX_BRIDGED_PKTS_THR_LOW))
+ break;
+ }
+ }
+
+ return pkt_deleted;
+}
+
+/* This function deletes packets from particular RA List. RA list index
+ * from which packets are deleted is preserved so that packets from next RA
+ * list are deleted upon subsequent call thus maintaining fairness.
+ */
+static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
+{
+ unsigned long flags;
+ struct list_head *ra_list;
+ int i;
+
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) {
+ if (priv->del_list_idx == MAX_NUM_TID)
+ priv->del_list_idx = 0;
+ ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list;
+ if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list)) {
+ priv->del_list_idx++;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+
static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
struct sk_buff *skb)
{
@@ -40,10 +103,11 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
if ((atomic_read(&adapter->pending_bridged_pkts) >=
- MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
+ MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
dev_err(priv->adapter->dev,
"Tx: Bridge packet limit reached. Drop packet!\n");
kfree_skb(skb);
+ mwifiex_uap_cleanup_tx_queues(priv);
return;
}
@@ -95,10 +159,6 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
atomic_inc(&adapter->tx_pending);
atomic_inc(&adapter->pending_bridged_pkts);
- if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
- mwifiex_set_trans_start(priv->netdev);
- mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
- }
return;
}
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index f90fe21e5bfd..2472d4b7f00e 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -24,9 +24,9 @@
static const char usbdriver_name[] = "usb8797";
-static u8 user_rmmod;
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
+static struct usb_card_rec *usb_card;
static struct usb_device_id mwifiex_usb_table[] = {
{USB_DEVICE(USB8797_VID, USB8797_PID_1)},
@@ -350,6 +350,7 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
card->udev = udev;
card->intf = intf;
+ usb_card = card;
pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n",
udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
@@ -532,7 +533,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
{
struct usb_card_rec *card = usb_get_intfdata(intf);
struct mwifiex_adapter *adapter;
- int i;
if (!card || !card->adapter) {
pr_err("%s: card or card->adapter is NULL\n", __func__);
@@ -543,27 +543,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
if (!adapter->priv_num)
return;
- /* In case driver is removed when asynchronous FW downloading is
- * in progress
- */
- wait_for_completion(&adapter->fw_load);
-
- if (user_rmmod) {
-#ifdef CONFIG_PM
- if (adapter->is_suspended)
- mwifiex_usb_resume(intf);
-#endif
- for (i = 0; i < adapter->priv_num; i++)
- if ((GET_BSS_ROLE(adapter->priv[i]) ==
- MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
- mwifiex_deauthenticate(adapter->priv[i], NULL);
-
- mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- MWIFIEX_FUNC_SHUTDOWN);
- }
-
mwifiex_usb_free(card);
dev_dbg(adapter->dev, "%s: removing card\n", __func__);
@@ -786,6 +765,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
return 0;
}
+static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
+{
+ struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+
+ usb_set_intfdata(card->intf, NULL);
+}
+
static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct mwifiex_fw_image *fw)
{
@@ -978,6 +964,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
static struct mwifiex_if_ops usb_ops = {
.register_dev = mwifiex_register_dev,
+ .unregister_dev = mwifiex_unregister_dev,
.wakeup = mwifiex_pm_wakeup_card,
.wakeup_complete = mwifiex_pm_wakeup_card_complete,
@@ -1024,8 +1011,29 @@ static void mwifiex_usb_cleanup_module(void)
if (!down_interruptible(&add_remove_card_sem))
up(&add_remove_card_sem);
- /* set the flag as user is removing this module */
- user_rmmod = 1;
+ if (usb_card) {
+ struct mwifiex_adapter *adapter = usb_card->adapter;
+ int i;
+
+ /* In case driver is removed when asynchronous FW downloading is
+ * in progress
+ */
+ wait_for_completion(&adapter->fw_load);
+
+#ifdef CONFIG_PM
+ if (adapter->is_suspended)
+ mwifiex_usb_resume(usb_card->intf);
+#endif
+ for (i = 0; i < adapter->priv_num; i++)
+ if ((GET_BSS_ROLE(adapter->priv[i]) ==
+ MWIFIEX_BSS_ROLE_STA) &&
+ adapter->priv[i]->media_connected)
+ mwifiex_deauthenticate(adapter->priv[i], NULL);
+
+ mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_FUNC_SHUTDOWN);
+ }
usb_deregister(&mwifiex_usb_driver);
}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index e57ac0dd3ab5..5d9e150f4111 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -171,8 +171,8 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq,
- CAL_RSSI(rx_pd->snr, rx_pd->nf),
- skb->data, pkt_len, GFP_ATOMIC);
+ CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len,
+ 0, GFP_ATOMIC);
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 944e8846f6fc..2e8f9cdea54d 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -120,7 +120,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
memcpy(ra_list->ra, ra, ETH_ALEN);
- ra_list->total_pkts_size = 0;
+ ra_list->total_pkt_count = 0;
dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
@@ -188,7 +188,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
ra_list, ra_list->is_11n_enabled);
if (ra_list->is_11n_enabled) {
- ra_list->pkt_count = 0;
+ ra_list->ba_pkt_count = 0;
ra_list->ba_packet_thr =
mwifiex_get_random_ba_threshold();
}
@@ -679,8 +679,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
skb_queue_tail(&ra_list->skb_head, skb);
- ra_list->total_pkts_size += skb->len;
- ra_list->pkt_count++;
+ ra_list->ba_pkt_count++;
+ ra_list->total_pkt_count++;
if (atomic_read(&priv->wmm.highest_queued_prio) <
tos_to_tid_inv[tid_down])
@@ -1037,7 +1037,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
tx_info = MWIFIEX_SKB_TXCB(skb);
dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
- ptr->total_pkts_size -= skb->len;
+ ptr->total_pkt_count--;
if (!skb_queue_empty(&ptr->skb_head))
skb_next = skb_peek(&ptr->skb_head);
@@ -1062,8 +1062,8 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
skb_queue_tail(&ptr->skb_head, skb);
- ptr->total_pkts_size += skb->len;
- ptr->pkt_count++;
+ ptr->total_pkt_count++;
+ ptr->ba_pkt_count++;
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
@@ -1224,7 +1224,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
mwifiex_send_single_packet() */
} else {
if (mwifiex_is_ampdu_allowed(priv, tid) &&
- ptr->pkt_count > ptr->ba_packet_thr) {
+ ptr->ba_pkt_count > ptr->ba_packet_thr) {
if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
mwifiex_create_ba_tbl(priv, ptr->ra, tid,
BA_SETUP_INPROGRESS);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 3e60a31582f8..68dbbb9c6d12 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -166,6 +166,12 @@ config RT2800USB_RT35XX
rt2800usb driver.
Supported chips: RT3572
+config RT2800USB_RT3573
+ bool "rt2800usb - Include support for rt3573 devices (EXPERIMENTAL)"
+ ---help---
+ This enables support for RT3573 chipset based wireless USB devices
+ in the rt2800usb driver.
+
config RT2800USB_RT53XX
bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
---help---
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index d78c495a86a0..fa33b5edf931 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -88,6 +88,7 @@
#define REV_RT3071E 0x0211
#define REV_RT3090E 0x0211
#define REV_RT3390E 0x0211
+#define REV_RT3593E 0x0211
#define REV_RT5390F 0x0502
#define REV_RT5390R 0x1502
#define REV_RT5592C 0x0221
@@ -1082,6 +1083,15 @@
#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000)
#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000)
#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_0_CCK1_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_0_CCK1_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_0_CCK5_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_0_CCK5_CH1 FIELD32(0x0000f000)
+#define TX_PWR_CFG_0_OFDM6_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_0_OFDM6_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_0_OFDM12_CH0 FIELD32(0x0f000000)
+#define TX_PWR_CFG_0_OFDM12_CH1 FIELD32(0xf0000000)
/*
* TX_PWR_CFG_1:
@@ -1095,6 +1105,15 @@
#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000)
#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000)
#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_1_OFDM24_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_1_OFDM24_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_1_OFDM48_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_1_OFDM48_CH1 FIELD32(0x0000f000)
+#define TX_PWR_CFG_1_MCS0_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_1_MCS0_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_1_MCS2_CH0 FIELD32(0x0f000000)
+#define TX_PWR_CFG_1_MCS2_CH1 FIELD32(0xf0000000)
/*
* TX_PWR_CFG_2:
@@ -1108,6 +1127,15 @@
#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000)
#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000)
#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_2_MCS4_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_2_MCS4_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_2_MCS6_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_2_MCS6_CH1 FIELD32(0x0000f000)
+#define TX_PWR_CFG_2_MCS8_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_2_MCS8_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_2_MCS10_CH0 FIELD32(0x0f000000)
+#define TX_PWR_CFG_2_MCS10_CH1 FIELD32(0xf0000000)
/*
* TX_PWR_CFG_3:
@@ -1121,6 +1149,15 @@
#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_3_MCS12_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_MCS12_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_3_MCS14_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_MCS14_CH1 FIELD32(0x0000f000)
+#define TX_PWR_CFG_3_STBC0_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_STBC0_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_3_STBC2_CH0 FIELD32(0x0f000000)
+#define TX_PWR_CFG_3_STBC2_CH1 FIELD32(0xf0000000)
/*
* TX_PWR_CFG_4:
@@ -1130,6 +1167,11 @@
#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_3_STBC4_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_STBC4_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_3_STBC6_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_STBC6_CH1 FIELD32(0x0000f000)
/*
* TX_PIN_CFG:
@@ -1451,6 +1493,81 @@
*/
#define EXP_ACK_TIME 0x1380
+/* TX_PWR_CFG_5 */
+#define TX_PWR_CFG_5 0x1384
+#define TX_PWR_CFG_5_MCS16_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_5_MCS16_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_5_MCS16_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_5_MCS18_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_5_MCS18_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_5_MCS18_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_6 */
+#define TX_PWR_CFG_6 0x1388
+#define TX_PWR_CFG_6_MCS20_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_6_MCS20_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_6_MCS20_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_6_MCS22_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_6_MCS22_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_6_MCS22_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_0_EXT */
+#define TX_PWR_CFG_0_EXT 0x1390
+#define TX_PWR_CFG_0_EXT_CCK1_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_0_EXT_CCK5_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_0_EXT_OFDM6_CH2 FIELD32(0x000f0000)
+#define TX_PWR_CFG_0_EXT_OFDM12_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_1_EXT */
+#define TX_PWR_CFG_1_EXT 0x1394
+#define TX_PWR_CFG_1_EXT_OFDM24_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_1_EXT_OFDM48_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_1_EXT_MCS0_CH2 FIELD32(0x000f0000)
+#define TX_PWR_CFG_1_EXT_MCS2_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_2_EXT */
+#define TX_PWR_CFG_2_EXT 0x1398
+#define TX_PWR_CFG_2_EXT_MCS4_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_2_EXT_MCS6_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_2_EXT_MCS8_CH2 FIELD32(0x000f0000)
+#define TX_PWR_CFG_2_EXT_MCS10_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_3_EXT */
+#define TX_PWR_CFG_3_EXT 0x139c
+#define TX_PWR_CFG_3_EXT_MCS12_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_EXT_MCS14_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_EXT_STBC0_CH2 FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_EXT_STBC2_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_4_EXT */
+#define TX_PWR_CFG_4_EXT 0x13a0
+#define TX_PWR_CFG_4_EXT_STBC4_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_4_EXT_STBC6_CH2 FIELD32(0x00000f00)
+
+/* TX_PWR_CFG_7 */
+#define TX_PWR_CFG_7 0x13d4
+#define TX_PWR_CFG_7_OFDM54_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_7_OFDM54_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_7_OFDM54_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_7_MCS7_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_7_MCS7_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_7_MCS7_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_8 */
+#define TX_PWR_CFG_8 0x13d8
+#define TX_PWR_CFG_8_MCS15_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_8_MCS15_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_8_MCS15_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_8_MCS23_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_8_MCS23_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_8_MCS23_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_9 */
+#define TX_PWR_CFG_9 0x13dc
+#define TX_PWR_CFG_9_STBC7_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_9_STBC7_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_9_STBC7_CH2 FIELD32(0x00000f00)
+
/*
* RX_FILTER_CFG: RX configuration register.
*/
@@ -1902,11 +2019,13 @@ struct mac_iveiv_entry {
#define HW_BEACON_BASE6 0x5dc0
#define HW_BEACON_BASE7 0x5bc0
-#define HW_BEACON_OFFSET(__index) \
+#define HW_BEACON_BASE(__index) \
(((__index) < 4) ? (HW_BEACON_BASE0 + (__index * 0x0200)) : \
(((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \
(HW_BEACON_BASE6 - ((__index - 6) * 0x0200))))
+#define BEACON_BASE_TO_OFFSET(_base) (((_base) - 0x4000) / 64)
+
/*
* BBP registers.
* The wordsize of the BBP is 8 bits.
@@ -1975,6 +2094,10 @@ struct mac_iveiv_entry {
#define BBP109_TX0_POWER FIELD8(0x0f)
#define BBP109_TX1_POWER FIELD8(0xf0)
+/* BBP 110 */
+#define BBP110_TX2_POWER FIELD8(0x0f)
+
+
/*
* BBP 138: Unknown
*/
@@ -2024,6 +2147,12 @@ struct mac_iveiv_entry {
#define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80)
/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
#define RFCSR3_VCOCAL_EN FIELD8(0x80)
+/* Bits for RF3050 */
+#define RFCSR3_BIT1 FIELD8(0x02)
+#define RFCSR3_BIT2 FIELD8(0x04)
+#define RFCSR3_BIT3 FIELD8(0x08)
+#define RFCSR3_BIT4 FIELD8(0x10)
+#define RFCSR3_BIT5 FIELD8(0x20)
/*
* FRCSR 5:
@@ -2036,6 +2165,8 @@ struct mac_iveiv_entry {
#define RFCSR6_R1 FIELD8(0x03)
#define RFCSR6_R2 FIELD8(0x40)
#define RFCSR6_TXDIV FIELD8(0x0c)
+/* bits for RF3053 */
+#define RFCSR6_VCO_IC FIELD8(0xc0)
/*
* RFCSR 7:
@@ -2060,7 +2191,12 @@ struct mac_iveiv_entry {
* RFCSR 11:
*/
#define RFCSR11_R FIELD8(0x03)
+#define RFCSR11_PLL_MOD FIELD8(0x0c)
#define RFCSR11_MOD FIELD8(0xc0)
+/* bits for RF3053 */
+/* TODO: verify RFCSR11_MOD usage on other chips */
+#define RFCSR11_PLL_IDOH FIELD8(0x40)
+
/*
* RFCSR 12:
@@ -2092,6 +2228,10 @@ struct mac_iveiv_entry {
#define RFCSR17_R FIELD8(0x20)
#define RFCSR17_CODE FIELD8(0x7f)
+/* RFCSR 18 */
+#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40)
+
+
/*
* RFCSR 20:
*/
@@ -2152,6 +2292,12 @@ struct mac_iveiv_entry {
#define RFCSR31_RX_H20M FIELD8(0x20)
#define RFCSR31_RX_CALIB FIELD8(0x7f)
+/* RFCSR 32 bits for RF3053 */
+#define RFCSR32_TX_AGC_FC FIELD8(0xf8)
+
+/* RFCSR 36 bits for RF3053 */
+#define RFCSR36_RF_BS FIELD8(0x80)
+
/*
* RFCSR 38:
*/
@@ -2160,6 +2306,7 @@ struct mac_iveiv_entry {
/*
* RFCSR 39:
*/
+#define RFCSR39_RX_DIV FIELD8(0x40)
#define RFCSR39_RX_LO2_EN FIELD8(0x80)
/*
@@ -2167,12 +2314,36 @@ struct mac_iveiv_entry {
*/
#define RFCSR49_TX FIELD8(0x3f)
#define RFCSR49_EP FIELD8(0xc0)
+/* bits for RT3593 */
+#define RFCSR49_TX_LO1_IC FIELD8(0x1c)
+#define RFCSR49_TX_DIV FIELD8(0x20)
/*
* RFCSR 50:
*/
#define RFCSR50_TX FIELD8(0x3f)
#define RFCSR50_EP FIELD8(0xc0)
+/* bits for RT3593 */
+#define RFCSR50_TX_LO1_EN FIELD8(0x20)
+#define RFCSR50_TX_LO2_EN FIELD8(0x10)
+
+/* RFCSR 51 */
+/* bits for RT3593 */
+#define RFCSR51_BITS01 FIELD8(0x03)
+#define RFCSR51_BITS24 FIELD8(0x1c)
+#define RFCSR51_BITS57 FIELD8(0xe0)
+
+#define RFCSR53_TX_POWER FIELD8(0x3f)
+#define RFCSR53_UNKNOWN FIELD8(0xc0)
+
+#define RFCSR54_TX_POWER FIELD8(0x3f)
+#define RFCSR54_UNKNOWN FIELD8(0xc0)
+
+#define RFCSR55_TX_POWER FIELD8(0x3f)
+#define RFCSR55_UNKNOWN FIELD8(0xc0)
+
+#define RFCSR57_DRV_CC FIELD8(0xfc)
+
/*
* RF registers
@@ -2206,28 +2377,67 @@ struct mac_iveiv_entry {
* The wordsize of the EEPROM is 16 bits.
*/
-/*
- * Chip ID
- */
-#define EEPROM_CHIP_ID 0x0000
+enum rt2800_eeprom_word {
+ EEPROM_CHIP_ID = 0,
+ EEPROM_VERSION,
+ EEPROM_MAC_ADDR_0,
+ EEPROM_MAC_ADDR_1,
+ EEPROM_MAC_ADDR_2,
+ EEPROM_NIC_CONF0,
+ EEPROM_NIC_CONF1,
+ EEPROM_FREQ,
+ EEPROM_LED_AG_CONF,
+ EEPROM_LED_ACT_CONF,
+ EEPROM_LED_POLARITY,
+ EEPROM_NIC_CONF2,
+ EEPROM_LNA,
+ EEPROM_RSSI_BG,
+ EEPROM_RSSI_BG2,
+ EEPROM_TXMIXER_GAIN_BG,
+ EEPROM_RSSI_A,
+ EEPROM_RSSI_A2,
+ EEPROM_TXMIXER_GAIN_A,
+ EEPROM_EIRP_MAX_TX_POWER,
+ EEPROM_TXPOWER_DELTA,
+ EEPROM_TXPOWER_BG1,
+ EEPROM_TXPOWER_BG2,
+ EEPROM_TSSI_BOUND_BG1,
+ EEPROM_TSSI_BOUND_BG2,
+ EEPROM_TSSI_BOUND_BG3,
+ EEPROM_TSSI_BOUND_BG4,
+ EEPROM_TSSI_BOUND_BG5,
+ EEPROM_TXPOWER_A1,
+ EEPROM_TXPOWER_A2,
+ EEPROM_TSSI_BOUND_A1,
+ EEPROM_TSSI_BOUND_A2,
+ EEPROM_TSSI_BOUND_A3,
+ EEPROM_TSSI_BOUND_A4,
+ EEPROM_TSSI_BOUND_A5,
+ EEPROM_TXPOWER_BYRATE,
+ EEPROM_BBP_START,
+
+ /* IDs for extended EEPROM format used by three-chain devices */
+ EEPROM_EXT_LNA2,
+ EEPROM_EXT_TXPOWER_BG3,
+ EEPROM_EXT_TXPOWER_A3,
+
+ /* New values must be added before this */
+ EEPROM_WORD_COUNT
+};
/*
* EEPROM Version
*/
-#define EEPROM_VERSION 0x0001
#define EEPROM_VERSION_FAE FIELD16(0x00ff)
#define EEPROM_VERSION_VERSION FIELD16(0xff00)
/*
* HW MAC address.
*/
-#define EEPROM_MAC_ADDR_0 0x0002
#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
-#define EEPROM_MAC_ADDR_1 0x0003
#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
-#define EEPROM_MAC_ADDR_2 0x0004
#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
@@ -2237,7 +2447,6 @@ struct mac_iveiv_entry {
* TXPATH: 1: 1T, 2: 2T, 3: 3T
* RF_TYPE: RFIC type
*/
-#define EEPROM_NIC_CONF0 0x001a
#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
@@ -2261,7 +2470,6 @@ struct mac_iveiv_entry {
* BT_COEXIST: 0: disable, 1: enable
* DAC_TEST: 0: disable, 1: enable
*/
-#define EEPROM_NIC_CONF1 0x001b
#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
@@ -2281,7 +2489,6 @@ struct mac_iveiv_entry {
/*
* EEPROM frequency
*/
-#define EEPROM_FREQ 0x001d
#define EEPROM_FREQ_OFFSET FIELD16(0x00ff)
#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00)
#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000)
@@ -2298,9 +2505,6 @@ struct mac_iveiv_entry {
* POLARITY_GPIO_4: Polarity GPIO4 setting.
* LED_MODE: Led mode.
*/
-#define EEPROM_LED_AG_CONF 0x001e
-#define EEPROM_LED_ACT_CONF 0x001f
-#define EEPROM_LED_POLARITY 0x0020
#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
@@ -2317,7 +2521,6 @@ struct mac_iveiv_entry {
* TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
* CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
*/
-#define EEPROM_NIC_CONF2 0x0021
#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
@@ -2325,54 +2528,46 @@ struct mac_iveiv_entry {
/*
* EEPROM LNA
*/
-#define EEPROM_LNA 0x0022
#define EEPROM_LNA_BG FIELD16(0x00ff)
#define EEPROM_LNA_A0 FIELD16(0xff00)
/*
* EEPROM RSSI BG offset
*/
-#define EEPROM_RSSI_BG 0x0023
#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff)
#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00)
/*
* EEPROM RSSI BG2 offset
*/
-#define EEPROM_RSSI_BG2 0x0024
#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff)
#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
/*
* EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2).
*/
-#define EEPROM_TXMIXER_GAIN_BG 0x0024
#define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007)
/*
* EEPROM RSSI A offset
*/
-#define EEPROM_RSSI_A 0x0025
#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff)
#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00)
/*
* EEPROM RSSI A2 offset
*/
-#define EEPROM_RSSI_A2 0x0026
#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
/*
* EEPROM TXMIXER GAIN A offset (note overlaps with EEPROM RSSI A2).
*/
-#define EEPROM_TXMIXER_GAIN_A 0x0026
#define EEPROM_TXMIXER_GAIN_A_VAL FIELD16(0x0007)
/*
* EEPROM EIRP Maximum TX power values(unit: dbm)
*/
-#define EEPROM_EIRP_MAX_TX_POWER 0x0027
#define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff)
#define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00)
@@ -2383,7 +2578,6 @@ struct mac_iveiv_entry {
* TYPE: 1: Plus the delta value, 0: minus the delta value
* ENABLE: enable tx power compensation for 40BW
*/
-#define EEPROM_TXPOWER_DELTA 0x0028
#define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f)
#define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040)
#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
@@ -2394,8 +2588,6 @@ struct mac_iveiv_entry {
/*
* EEPROM TXPOWER 802.11BG
*/
-#define EEPROM_TXPOWER_BG1 0x0029
-#define EEPROM_TXPOWER_BG2 0x0030
#define EEPROM_TXPOWER_BG_SIZE 7
#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff)
#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
@@ -2407,7 +2599,6 @@ struct mac_iveiv_entry {
* MINUS3: If the actual TSSI is below this boundary, tx power needs to be
* reduced by (agc_step * -3)
*/
-#define EEPROM_TSSI_BOUND_BG1 0x0037
#define EEPROM_TSSI_BOUND_BG1_MINUS4 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG1_MINUS3 FIELD16(0xff00)
@@ -2418,7 +2609,6 @@ struct mac_iveiv_entry {
* MINUS1: If the actual TSSI is below this boundary, tx power needs to be
* reduced by (agc_step * -1)
*/
-#define EEPROM_TSSI_BOUND_BG2 0x0038
#define EEPROM_TSSI_BOUND_BG2_MINUS2 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG2_MINUS1 FIELD16(0xff00)
@@ -2428,7 +2618,6 @@ struct mac_iveiv_entry {
* PLUS1: If the actual TSSI is above this boundary, tx power needs to be
* increased by (agc_step * 1)
*/
-#define EEPROM_TSSI_BOUND_BG3 0x0039
#define EEPROM_TSSI_BOUND_BG3_REF FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG3_PLUS1 FIELD16(0xff00)
@@ -2439,7 +2628,6 @@ struct mac_iveiv_entry {
* PLUS3: If the actual TSSI is above this boundary, tx power needs to be
* increased by (agc_step * 3)
*/
-#define EEPROM_TSSI_BOUND_BG4 0x003a
#define EEPROM_TSSI_BOUND_BG4_PLUS2 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG4_PLUS3 FIELD16(0xff00)
@@ -2449,19 +2637,20 @@ struct mac_iveiv_entry {
* increased by (agc_step * 4)
* AGC_STEP: Temperature compensation step.
*/
-#define EEPROM_TSSI_BOUND_BG5 0x003b
#define EEPROM_TSSI_BOUND_BG5_PLUS4 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG5_AGC_STEP FIELD16(0xff00)
/*
* EEPROM TXPOWER 802.11A
*/
-#define EEPROM_TXPOWER_A1 0x003c
-#define EEPROM_TXPOWER_A2 0x0053
#define EEPROM_TXPOWER_A_SIZE 6
#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff)
#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
+/* EEPROM_TXPOWER_{A,G} fields for RT3593 */
+#define EEPROM_TXPOWER_ALC FIELD8(0x1f)
+#define EEPROM_TXPOWER_FINE_CTRL FIELD8(0xe0)
+
/*
* EEPROM temperature compensation boundaries 802.11A
* MINUS4: If the actual TSSI is below this boundary, tx power needs to be
@@ -2469,7 +2658,6 @@ struct mac_iveiv_entry {
* MINUS3: If the actual TSSI is below this boundary, tx power needs to be
* reduced by (agc_step * -3)
*/
-#define EEPROM_TSSI_BOUND_A1 0x006a
#define EEPROM_TSSI_BOUND_A1_MINUS4 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A1_MINUS3 FIELD16(0xff00)
@@ -2480,7 +2668,6 @@ struct mac_iveiv_entry {
* MINUS1: If the actual TSSI is below this boundary, tx power needs to be
* reduced by (agc_step * -1)
*/
-#define EEPROM_TSSI_BOUND_A2 0x006b
#define EEPROM_TSSI_BOUND_A2_MINUS2 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A2_MINUS1 FIELD16(0xff00)
@@ -2490,7 +2677,6 @@ struct mac_iveiv_entry {
* PLUS1: If the actual TSSI is above this boundary, tx power needs to be
* increased by (agc_step * 1)
*/
-#define EEPROM_TSSI_BOUND_A3 0x006c
#define EEPROM_TSSI_BOUND_A3_REF FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A3_PLUS1 FIELD16(0xff00)
@@ -2501,7 +2687,6 @@ struct mac_iveiv_entry {
* PLUS3: If the actual TSSI is above this boundary, tx power needs to be
* increased by (agc_step * 3)
*/
-#define EEPROM_TSSI_BOUND_A4 0x006d
#define EEPROM_TSSI_BOUND_A4_PLUS2 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A4_PLUS3 FIELD16(0xff00)
@@ -2511,14 +2696,12 @@ struct mac_iveiv_entry {
* increased by (agc_step * 4)
* AGC_STEP: Temperature compensation step.
*/
-#define EEPROM_TSSI_BOUND_A5 0x006e
#define EEPROM_TSSI_BOUND_A5_PLUS4 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A5_AGC_STEP FIELD16(0xff00)
/*
* EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode
*/
-#define EEPROM_TXPOWER_BYRATE 0x006f
#define EEPROM_TXPOWER_BYRATE_SIZE 9
#define EEPROM_TXPOWER_BYRATE_RATE0 FIELD16(0x000f)
@@ -2529,11 +2712,14 @@ struct mac_iveiv_entry {
/*
* EEPROM BBP.
*/
-#define EEPROM_BBP_START 0x0078
#define EEPROM_BBP_SIZE 16
#define EEPROM_BBP_VALUE FIELD16(0x00ff)
#define EEPROM_BBP_REG_ID FIELD16(0xff00)
+/* EEPROM_EXT_LNA2 */
+#define EEPROM_EXT_LNA2_A1 FIELD16(0x00ff)
+#define EEPROM_EXT_LNA2_A2 FIELD16(0xff00)
+
/*
* EEPROM IQ Calibration, unlike other entries those are byte addresses.
*/
@@ -2610,6 +2796,7 @@ struct mac_iveiv_entry {
#define MCU_RADAR 0x60
#define MCU_BOOT_SIGNAL 0x72
#define MCU_ANT_SELECT 0X73
+#define MCU_FREQ_OFFSET 0x74
#define MCU_BBP_SIGNAL 0x80
#define MCU_POWER_SAVE 0x83
#define MCU_BAND_SELECT 0x91
@@ -2630,6 +2817,7 @@ struct mac_iveiv_entry {
#define TXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32))
#define RXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32))
+#define RXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32))
#define RXWI_DESC_SIZE_6WORDS (6 * sizeof(__le32))
/*
@@ -2750,18 +2938,15 @@ struct mac_iveiv_entry {
#define MAX_A_TXPOWER 15
#define DEFAULT_TXPOWER 5
+#define MIN_A_TXPOWER_3593 0
+#define MAX_A_TXPOWER_3593 31
+
#define TXPOWER_G_FROM_DEV(__txpower) \
((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
-#define TXPOWER_G_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
-
#define TXPOWER_A_FROM_DEV(__txpower) \
((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
-#define TXPOWER_A_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
-
/*
* Board's maximun TX power limitation
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 1b41c8eda12d..95e6e61c3de0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,6 +221,157 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
mutex_unlock(&rt2x00dev->csr_mutex);
}
+static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = {
+ [EEPROM_CHIP_ID] = 0x0000,
+ [EEPROM_VERSION] = 0x0001,
+ [EEPROM_MAC_ADDR_0] = 0x0002,
+ [EEPROM_MAC_ADDR_1] = 0x0003,
+ [EEPROM_MAC_ADDR_2] = 0x0004,
+ [EEPROM_NIC_CONF0] = 0x001a,
+ [EEPROM_NIC_CONF1] = 0x001b,
+ [EEPROM_FREQ] = 0x001d,
+ [EEPROM_LED_AG_CONF] = 0x001e,
+ [EEPROM_LED_ACT_CONF] = 0x001f,
+ [EEPROM_LED_POLARITY] = 0x0020,
+ [EEPROM_NIC_CONF2] = 0x0021,
+ [EEPROM_LNA] = 0x0022,
+ [EEPROM_RSSI_BG] = 0x0023,
+ [EEPROM_RSSI_BG2] = 0x0024,
+ [EEPROM_TXMIXER_GAIN_BG] = 0x0024, /* overlaps with RSSI_BG2 */
+ [EEPROM_RSSI_A] = 0x0025,
+ [EEPROM_RSSI_A2] = 0x0026,
+ [EEPROM_TXMIXER_GAIN_A] = 0x0026, /* overlaps with RSSI_A2 */
+ [EEPROM_EIRP_MAX_TX_POWER] = 0x0027,
+ [EEPROM_TXPOWER_DELTA] = 0x0028,
+ [EEPROM_TXPOWER_BG1] = 0x0029,
+ [EEPROM_TXPOWER_BG2] = 0x0030,
+ [EEPROM_TSSI_BOUND_BG1] = 0x0037,
+ [EEPROM_TSSI_BOUND_BG2] = 0x0038,
+ [EEPROM_TSSI_BOUND_BG3] = 0x0039,
+ [EEPROM_TSSI_BOUND_BG4] = 0x003a,
+ [EEPROM_TSSI_BOUND_BG5] = 0x003b,
+ [EEPROM_TXPOWER_A1] = 0x003c,
+ [EEPROM_TXPOWER_A2] = 0x0053,
+ [EEPROM_TSSI_BOUND_A1] = 0x006a,
+ [EEPROM_TSSI_BOUND_A2] = 0x006b,
+ [EEPROM_TSSI_BOUND_A3] = 0x006c,
+ [EEPROM_TSSI_BOUND_A4] = 0x006d,
+ [EEPROM_TSSI_BOUND_A5] = 0x006e,
+ [EEPROM_TXPOWER_BYRATE] = 0x006f,
+ [EEPROM_BBP_START] = 0x0078,
+};
+
+static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
+ [EEPROM_CHIP_ID] = 0x0000,
+ [EEPROM_VERSION] = 0x0001,
+ [EEPROM_MAC_ADDR_0] = 0x0002,
+ [EEPROM_MAC_ADDR_1] = 0x0003,
+ [EEPROM_MAC_ADDR_2] = 0x0004,
+ [EEPROM_NIC_CONF0] = 0x001a,
+ [EEPROM_NIC_CONF1] = 0x001b,
+ [EEPROM_NIC_CONF2] = 0x001c,
+ [EEPROM_EIRP_MAX_TX_POWER] = 0x0020,
+ [EEPROM_FREQ] = 0x0022,
+ [EEPROM_LED_AG_CONF] = 0x0023,
+ [EEPROM_LED_ACT_CONF] = 0x0024,
+ [EEPROM_LED_POLARITY] = 0x0025,
+ [EEPROM_LNA] = 0x0026,
+ [EEPROM_EXT_LNA2] = 0x0027,
+ [EEPROM_RSSI_BG] = 0x0028,
+ [EEPROM_TXPOWER_DELTA] = 0x0028, /* Overlaps with RSSI_BG */
+ [EEPROM_RSSI_BG2] = 0x0029,
+ [EEPROM_TXMIXER_GAIN_BG] = 0x0029, /* Overlaps with RSSI_BG2 */
+ [EEPROM_RSSI_A] = 0x002a,
+ [EEPROM_RSSI_A2] = 0x002b,
+ [EEPROM_TXMIXER_GAIN_A] = 0x002b, /* Overlaps with RSSI_A2 */
+ [EEPROM_TXPOWER_BG1] = 0x0030,
+ [EEPROM_TXPOWER_BG2] = 0x0037,
+ [EEPROM_EXT_TXPOWER_BG3] = 0x003e,
+ [EEPROM_TSSI_BOUND_BG1] = 0x0045,
+ [EEPROM_TSSI_BOUND_BG2] = 0x0046,
+ [EEPROM_TSSI_BOUND_BG3] = 0x0047,
+ [EEPROM_TSSI_BOUND_BG4] = 0x0048,
+ [EEPROM_TSSI_BOUND_BG5] = 0x0049,
+ [EEPROM_TXPOWER_A1] = 0x004b,
+ [EEPROM_TXPOWER_A2] = 0x0065,
+ [EEPROM_EXT_TXPOWER_A3] = 0x007f,
+ [EEPROM_TSSI_BOUND_A1] = 0x009a,
+ [EEPROM_TSSI_BOUND_A2] = 0x009b,
+ [EEPROM_TSSI_BOUND_A3] = 0x009c,
+ [EEPROM_TSSI_BOUND_A4] = 0x009d,
+ [EEPROM_TSSI_BOUND_A5] = 0x009e,
+ [EEPROM_TXPOWER_BYRATE] = 0x00a0,
+};
+
+static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word word)
+{
+ const unsigned int *map;
+ unsigned int index;
+
+ if (WARN_ONCE(word >= EEPROM_WORD_COUNT,
+ "%s: invalid EEPROM word %d\n",
+ wiphy_name(rt2x00dev->hw->wiphy), word))
+ return 0;
+
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ map = rt2800_eeprom_map_ext;
+ else
+ map = rt2800_eeprom_map;
+
+ index = map[word];
+
+ /* Index 0 is valid only for EEPROM_CHIP_ID.
+ * Otherwise it means that the offset of the
+ * given word is not initialized in the map,
+ * or that the field is not usable on the
+ * actual chipset.
+ */
+ WARN_ONCE(word != EEPROM_CHIP_ID && index == 0,
+ "%s: invalid access of EEPROM word %d\n",
+ wiphy_name(rt2x00dev->hw->wiphy), word);
+
+ return index;
+}
+
+static void *rt2800_eeprom_addr(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word word)
+{
+ unsigned int index;
+
+ index = rt2800_eeprom_word_index(rt2x00dev, word);
+ return rt2x00_eeprom_addr(rt2x00dev, index);
+}
+
+static void rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word word, u16 *data)
+{
+ unsigned int index;
+
+ index = rt2800_eeprom_word_index(rt2x00dev, word);
+ rt2x00_eeprom_read(rt2x00dev, index, data);
+}
+
+static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word word, u16 data)
+{
+ unsigned int index;
+
+ index = rt2800_eeprom_word_index(rt2x00dev, word);
+ rt2x00_eeprom_write(rt2x00dev, index, data);
+}
+
+static void rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word array,
+ unsigned int offset,
+ u16 *data)
+{
+ unsigned int index;
+
+ index = rt2800_eeprom_word_index(rt2x00dev, array);
+ rt2x00_eeprom_read(rt2x00dev, index + offset, data);
+}
+
static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -370,6 +521,29 @@ void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_disable_wpdma);
+void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
+ unsigned short *txwi_size,
+ unsigned short *rxwi_size)
+{
+ switch (rt2x00dev->chip.rt) {
+ case RT3593:
+ *txwi_size = TXWI_DESC_SIZE_4WORDS;
+ *rxwi_size = RXWI_DESC_SIZE_5WORDS;
+ break;
+
+ case RT5592:
+ *txwi_size = TXWI_DESC_SIZE_5WORDS;
+ *rxwi_size = RXWI_DESC_SIZE_6WORDS;
+ break;
+
+ default:
+ *txwi_size = TXWI_DESC_SIZE_4WORDS;
+ *rxwi_size = RXWI_DESC_SIZE_4WORDS;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800_get_txwi_rxwi_size);
+
static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
{
u16 fw_crc;
@@ -609,16 +783,16 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
u8 offset2;
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2);
} else {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2);
}
@@ -766,6 +940,18 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
}
EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
+static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev,
+ unsigned int index)
+{
+ return HW_BEACON_BASE(index);
+}
+
+static inline u8 rt2800_get_beacon_offset(struct rt2x00_dev *rt2x00dev,
+ unsigned int index)
+{
+ return BEACON_BASE_TO_OFFSET(rt2800_hw_beacon_base(rt2x00dev, index));
+}
+
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -818,7 +1004,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
return;
}
- beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+ beacon_base = rt2800_hw_beacon_base(rt2x00dev, entry->entry_idx);
+
rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
entry->skb->len + padding_len);
@@ -837,10 +1024,13 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
EXPORT_SYMBOL_GPL(rt2800_write_beacon);
static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
- unsigned int beacon_base)
+ unsigned int index)
{
int i;
const int txwi_desc_size = rt2x00dev->bcn->winfo_size;
+ unsigned int beacon_base;
+
+ beacon_base = rt2800_hw_beacon_base(rt2x00dev, index);
/*
* For the Beacon base registers we only need to clear
@@ -867,8 +1057,7 @@ void rt2800_clear_beacon(struct queue_entry *entry)
/*
* Clear beacon.
*/
- rt2800_clear_beacon_register(rt2x00dev,
- HW_BEACON_OFFSET(entry->entry_idx));
+ rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
/*
* Enabled beaconing again.
@@ -890,6 +1079,9 @@ const struct rt2x00debug rt2800_rt2x00debug = {
.word_count = CSR_REG_SIZE / sizeof(u32),
},
.eeprom = {
+ /* NOTE: The local EEPROM access functions can't
+ * be used here, use the generic versions instead.
+ */
.read = rt2x00_eeprom_read,
.write = rt2x00_eeprom_write,
.word_base = EEPROM_BASE,
@@ -1547,7 +1739,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3;
if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) ||
led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE);
if (led_ctrl == 0 || led_ctrl > 0x40) {
rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, led_g_mode);
@@ -1609,7 +1801,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
break;
case 3:
- rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
+ rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
break;
}
@@ -1622,7 +1814,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3390)) {
- rt2x00_eeprom_read(rt2x00dev,
+ rt2800_eeprom_read(rt2x00dev,
EEPROM_NIC_CONF1, &eeprom);
if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY))
@@ -1649,6 +1841,13 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2800_bbp_write(rt2x00dev, 3, r3);
rt2800_bbp_write(rt2x00dev, 1, r1);
+
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (ant->rx_chain_num == 1)
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0x46);
+ }
}
EXPORT_SYMBOL_GPL(rt2800_config_ant);
@@ -1659,22 +1858,73 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain;
if (libconf->rf.channel <= 14) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
} else if (libconf->rf.channel <= 64) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
} else if (libconf->rf.channel <= 128) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
- lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom,
+ EEPROM_EXT_LNA2_A1);
+ } else {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom,
+ EEPROM_RSSI_BG2_LNA_A1);
+ }
} else {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
- lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom,
+ EEPROM_EXT_LNA2_A2);
+ } else {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom,
+ EEPROM_RSSI_A2_LNA_A2);
+ }
}
rt2x00dev->lna_gain = lna_gain;
}
+#define FREQ_OFFSET_BOUND 0x5f
+
+static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
+{
+ u8 freq_offset, prev_freq_offset;
+ u8 rfcsr, prev_rfcsr;
+
+ freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE);
+ freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ prev_rfcsr = rfcsr;
+
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset);
+ if (rfcsr == prev_rfcsr)
+ return;
+
+ if (rt2x00_is_usb(rt2x00dev)) {
+ rt2800_mcu_request(rt2x00dev, MCU_FREQ_OFFSET, 0xff,
+ freq_offset, prev_rfcsr);
+ return;
+ }
+
+ prev_freq_offset = rt2x00_get_field8(prev_rfcsr, RFCSR17_CODE);
+ while (prev_freq_offset != freq_offset) {
+ if (prev_freq_offset < freq_offset)
+ prev_freq_offset++;
+ else
+ prev_freq_offset--;
+
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, prev_freq_offset);
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ usleep_range(1000, 1500);
+ }
+}
+
static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -1993,22 +2243,306 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
-#define POWER_BOUND 0x27
-#define POWER_BOUND_5G 0x2b
-#define FREQ_OFFSET_BOUND 0x5f
-
-static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
+static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u8 txrx_agc_fc;
+ u8 txrx_h20m;
u8 rfcsr;
+ u8 bbp;
+ const bool txbf_enabled = false; /* TODO */
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
- if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
+ /* TODO: use TX{0,1,2}FinePowerControl values from EEPROM */
+ rt2800_bbp_read(rt2x00dev, 109, &bbp);
+ rt2x00_set_field8(&bbp, BBP109_TX0_POWER, 0);
+ rt2x00_set_field8(&bbp, BBP109_TX1_POWER, 0);
+ rt2800_bbp_write(rt2x00dev, 109, bbp);
+
+ rt2800_bbp_read(rt2x00dev, 110, &bbp);
+ rt2x00_set_field8(&bbp, BBP110_TX2_POWER, 0);
+ rt2800_bbp_write(rt2x00dev, 110, bbp);
+
+ if (rf->channel <= 14) {
+ /* Restore BBP 25 & 26 for 2.4 GHz */
+ rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25);
+ rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26);
+ } else {
+ /* Hard code BBP 25 & 26 for 5GHz */
+
+ /* Enable IQ Phase correction */
+ rt2800_bbp_write(rt2x00dev, 25, 0x09);
+ /* Setup IQ Phase correction value */
+ rt2800_bbp_write(rt2x00dev, 26, 0xff);
+ }
+
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3 & 0xf);
+
+ rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_R, (rf->rf2 & 0x3));
+ rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_PLL_IDOH, 1);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 1);
else
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
- rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 2);
+ rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 53, &rfcsr);
+ if (rf->channel <= 14) {
+ rfcsr = 0;
+ rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
+ info->default_power1 & 0x1f);
+ } else {
+ if (rt2x00_is_usb(rt2x00dev))
+ rfcsr = 0x40;
+
+ rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
+ ((info->default_power1 & 0x18) << 1) |
+ (info->default_power1 & 7));
+ }
+ rt2800_rfcsr_write(rt2x00dev, 53, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 55, &rfcsr);
+ if (rf->channel <= 14) {
+ rfcsr = 0;
+ rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
+ info->default_power2 & 0x1f);
+ } else {
+ if (rt2x00_is_usb(rt2x00dev))
+ rfcsr = 0x40;
+
+ rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
+ ((info->default_power2 & 0x18) << 1) |
+ (info->default_power2 & 7));
+ }
+ rt2800_rfcsr_write(rt2x00dev, 55, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 54, &rfcsr);
+ if (rf->channel <= 14) {
+ rfcsr = 0;
+ rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
+ info->default_power3 & 0x1f);
+ } else {
+ if (rt2x00_is_usb(rt2x00dev))
+ rfcsr = 0x40;
+
+ rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
+ ((info->default_power3 & 0x18) << 1) |
+ (info->default_power3 & 7));
+ }
+ rt2800_rfcsr_write(rt2x00dev, 54, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 3:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+ /* fallthrough */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ /* fallthrough */
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+ break;
+ }
+
+ switch (rt2x00dev->default_ant.rx_chain_num) {
+ case 3:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+ /* fallthrough */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ /* fallthrough */
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+ break;
+ }
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_adjust_freq_offset(rt2x00dev);
+
+ if (conf_is_ht40(conf)) {
+ txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40,
+ RFCSR24_TX_AGC_FC);
+ txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw40,
+ RFCSR24_TX_H20M);
+ } else {
+ txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw20,
+ RFCSR24_TX_AGC_FC);
+ txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw20,
+ RFCSR24_TX_H20M);
+ }
+
+ /* NOTE: the reference driver does not writes the new value
+ * back to RFCSR 32
+ */
+ rt2800_rfcsr_read(rt2x00dev, 32, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR32_TX_AGC_FC, txrx_agc_fc);
+
+ if (rf->channel <= 14)
+ rfcsr = 0xa0;
+ else
+ rfcsr = 0x80;
+ rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, txrx_h20m);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, txrx_h20m);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ /* Band selection */
+ rt2800_rfcsr_read(rt2x00dev, 36, &rfcsr);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0);
+ rt2800_rfcsr_write(rt2x00dev, 36, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 34, &rfcsr);
+ if (rf->channel <= 14)
+ rfcsr = 0x3c;
+ else
+ rfcsr = 0x20;
+ rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
+ if (rf->channel <= 14)
+ rfcsr = 0x1a;
+ else
+ rfcsr = 0x12;
+ rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ if (rf->channel >= 1 && rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
+ else if (rf->channel >= 36 && rf->channel <= 64)
+ rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
+ else if (rf->channel >= 100 && rf->channel <= 128)
+ rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
+ rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
+
+ if (rf->channel <= 14) {
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+ } else {
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd8);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x23);
+ }
+
+ rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS01, 1);
+ rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+ if (rf->channel <= 14) {
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 5);
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 3);
+ } else {
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 4);
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 2);
+ }
+ rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 3);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 2);
+
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX_DIV, 1);
+
+ rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 57, &rfcsr);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x1b);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 57, rfcsr);
+
+ if (rf->channel <= 14) {
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x93);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
+ } else {
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x9b);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x05);
+ }
+
+ /* Initiate VCO calibration */
+ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ if (rf->channel <= 14) {
+ rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
+ } else {
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT1, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT2, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT3, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT4, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT5, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
+ }
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+
+ if (rf->channel >= 1 && rf->channel <= 14) {
+ rfcsr = 0x23;
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
+ } else if (rf->channel >= 36 && rf->channel <= 64) {
+ rfcsr = 0x36;
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x36);
+
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xeb);
+ } else if (rf->channel >= 100 && rf->channel <= 128) {
+ rfcsr = 0x32;
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xb3);
+ } else {
+ rfcsr = 0x30;
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 45, 0x9b);
+ }
}
+#define POWER_BOUND 0x27
+#define POWER_BOUND_5G 0x2b
+
static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -2563,6 +3097,23 @@ static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
}
+static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
+ unsigned int channel,
+ char txpower)
+{
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC);
+
+ if (channel <= 14)
+ return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
+
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ return clamp_t(char, txpower, MIN_A_TXPOWER_3593,
+ MAX_A_TXPOWER_3593);
+ else
+ return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
+}
+
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -2572,13 +3123,14 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp, rfcsr;
- if (rf->channel <= 14) {
- info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
- info->default_power2 = TXPOWER_G_TO_DEV(info->default_power2);
- } else {
- info->default_power1 = TXPOWER_A_TO_DEV(info->default_power1);
- info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
- }
+ info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
+ info->default_power1);
+ info->default_power2 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
+ info->default_power2);
+ if (rt2x00dev->default_ant.tx_chain_num > 2)
+ info->default_power3 =
+ rt2800_txpower_to_dev(rt2x00dev, rf->channel,
+ info->default_power3);
switch (rt2x00dev->chip.rf) {
case RF2020:
@@ -2591,6 +3143,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3052:
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
break;
+ case RF3053:
+ rt2800_config_channel_rf3053(rt2x00dev, conf, rf, info);
+ break;
case RF3290:
rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
break;
@@ -2636,6 +3191,23 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 27, 0x20);
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
+ } else if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rf->channel > 14) {
+ /* Disable CCK Packet detection on 5GHz */
+ rt2800_bbp_write(rt2x00dev, 70, 0x00);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+ }
+
+ if (conf_is_ht40(conf))
+ rt2800_bbp_write(rt2x00dev, 105, 0x04);
+ else
+ rt2800_bbp_write(rt2x00dev, 105, 0x34);
+
+ rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 77, 0x98);
} else {
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
@@ -2651,16 +3223,27 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
- rt2800_bbp_write(rt2x00dev, 82, 0x84);
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+ else
+ rt2800_bbp_write(rt2x00dev, 82, 0x84);
rt2800_bbp_write(rt2x00dev, 75, 0x50);
}
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_bbp_write(rt2x00dev, 83, 0x8a);
}
+
} else {
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_bbp_write(rt2x00dev, 82, 0x94);
+ else if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_bbp_write(rt2x00dev, 82, 0x82);
else
rt2800_bbp_write(rt2x00dev, 82, 0xf2);
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_bbp_write(rt2x00dev, 83, 0x9a);
+
if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
rt2800_bbp_write(rt2x00dev, 75, 0x46);
else
@@ -2731,6 +3314,41 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+
+ /* Band selection. GPIO #8 controls all paths */
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
+ if (rf->channel <= 14)
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
+ else
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
+
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
+
+ /* LNA PE control.
+ * GPIO #4 controls PE0 and PE1,
+ * GPIO #7 controls PE2
+ */
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
+
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+ }
+
+ /* AGC init */
+ if (rf->channel <= 14)
+ reg = 0x1c + 2 * rt2x00dev->lna_gain;
+ else
+ reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
+
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+
+ usleep_range(1000, 1500);
+ }
+
if (rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_bbp_write(rt2x00dev, 195, 141);
rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
@@ -2790,6 +3408,13 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
int i;
/*
+ * First check if temperature compensation is supported.
+ */
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
+ return 0;
+
+ /*
* Read TSSI boundaries for temperature compensation from
* the EEPROM.
*
@@ -2798,62 +3423,62 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
* Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
*/
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS4);
tssi_bounds[1] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS3);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
tssi_bounds[2] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG2_MINUS2);
tssi_bounds[3] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG2_MINUS1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
tssi_bounds[4] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG3_REF);
tssi_bounds[5] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG3_PLUS1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
tssi_bounds[6] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG4_PLUS2);
tssi_bounds[7] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG4_PLUS3);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
tssi_bounds[8] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG5_PLUS4);
step = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG5_AGC_STEP);
} else {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A1_MINUS4);
tssi_bounds[1] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A1_MINUS3);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
tssi_bounds[2] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A2_MINUS2);
tssi_bounds[3] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A2_MINUS1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
tssi_bounds[4] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A3_REF);
tssi_bounds[5] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A3_PLUS1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
tssi_bounds[6] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A4_PLUS2);
tssi_bounds[7] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A4_PLUS3);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
tssi_bounds[8] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A5_PLUS4);
@@ -2899,7 +3524,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
u8 comp_type;
int comp_value = 0;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
/*
* HT40 compensation not required.
@@ -2966,6 +3591,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
u8 eirp_txpower_criterion;
u8 reg_limit;
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ return min_t(u8, txpower, 0xc);
+
if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
/*
* Check if eirp txpower exceed txpower_limit.
@@ -2974,12 +3602,12 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
* .11b data rate need add additional 4dbm
* when calculating eirp txpower.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1,
- &eeprom);
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ 1, &eeprom);
criterion = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
&eeprom);
if (band == IEEE80211_BAND_2GHZ)
@@ -3001,6 +3629,412 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
return min_t(u8, txpower, 0xc);
}
+
+enum {
+ TX_PWR_CFG_0_IDX,
+ TX_PWR_CFG_1_IDX,
+ TX_PWR_CFG_2_IDX,
+ TX_PWR_CFG_3_IDX,
+ TX_PWR_CFG_4_IDX,
+ TX_PWR_CFG_5_IDX,
+ TX_PWR_CFG_6_IDX,
+ TX_PWR_CFG_7_IDX,
+ TX_PWR_CFG_8_IDX,
+ TX_PWR_CFG_9_IDX,
+ TX_PWR_CFG_0_EXT_IDX,
+ TX_PWR_CFG_1_EXT_IDX,
+ TX_PWR_CFG_2_EXT_IDX,
+ TX_PWR_CFG_3_EXT_IDX,
+ TX_PWR_CFG_4_EXT_IDX,
+ TX_PWR_CFG_IDX_COUNT,
+};
+
+static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_channel *chan,
+ int power_level)
+{
+ u8 txpower;
+ u16 eeprom;
+ u32 regs[TX_PWR_CFG_IDX_COUNT];
+ unsigned int offset;
+ enum ieee80211_band band = chan->band;
+ int delta;
+ int i;
+
+ memset(regs, '\0', sizeof(regs));
+
+ /* TODO: adapt TX power reduction from the rt28xx code */
+
+ /* calculate temperature compensation delta */
+ delta = rt2800_get_gain_calibration_delta(rt2x00dev);
+
+ if (band == IEEE80211_BAND_5GHZ)
+ offset = 16;
+ else
+ offset = 0;
+
+ if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+ offset += 8;
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset, &eeprom);
+
+ /* CCK 1MBS,2MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_CCK1_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_CCK1_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+ TX_PWR_CFG_0_EXT_CCK1_CH2, txpower);
+
+ /* CCK 5.5MBS,11MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_CCK5_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_CCK5_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+ TX_PWR_CFG_0_EXT_CCK5_CH2, txpower);
+
+ /* OFDM 6MBS,9MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_OFDM6_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_OFDM6_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+ TX_PWR_CFG_0_EXT_OFDM6_CH2, txpower);
+
+ /* OFDM 12MBS,18MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_OFDM12_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_OFDM12_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+ TX_PWR_CFG_0_EXT_OFDM12_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 1, &eeprom);
+
+ /* OFDM 24MBS,36MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_OFDM24_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_OFDM24_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+ TX_PWR_CFG_1_EXT_OFDM24_CH2, txpower);
+
+ /* OFDM 48MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_OFDM48_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_OFDM48_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+ TX_PWR_CFG_1_EXT_OFDM48_CH2, txpower);
+
+ /* OFDM 54MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_OFDM54_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_OFDM54_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_OFDM54_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 2, &eeprom);
+
+ /* MCS 0,1 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_MCS0_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_MCS0_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+ TX_PWR_CFG_1_EXT_MCS0_CH2, txpower);
+
+ /* MCS 2,3 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_MCS2_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_MCS2_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+ TX_PWR_CFG_1_EXT_MCS2_CH2, txpower);
+
+ /* MCS 4,5 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS4_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS4_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+ TX_PWR_CFG_2_EXT_MCS4_CH2, txpower);
+
+ /* MCS 6 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS6_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS6_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+ TX_PWR_CFG_2_EXT_MCS6_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 3, &eeprom);
+
+ /* MCS 7 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_MCS7_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_MCS7_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_MCS7_CH2, txpower);
+
+ /* MCS 8,9 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS8_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS8_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+ TX_PWR_CFG_2_EXT_MCS8_CH2, txpower);
+
+ /* MCS 10,11 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS10_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS10_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+ TX_PWR_CFG_2_EXT_MCS10_CH2, txpower);
+
+ /* MCS 12,13 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_MCS12_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_MCS12_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+ TX_PWR_CFG_3_EXT_MCS12_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 4, &eeprom);
+
+ /* MCS 14 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_MCS14_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_MCS14_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+ TX_PWR_CFG_3_EXT_MCS14_CH2, txpower);
+
+ /* MCS 15 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS15_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS15_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS15_CH2, txpower);
+
+ /* MCS 16,17 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS16_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS16_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS16_CH2, txpower);
+
+ /* MCS 18,19 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS18_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS18_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS18_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 5, &eeprom);
+
+ /* MCS 20,21 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS20_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS20_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS20_CH2, txpower);
+
+ /* MCS 22 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS22_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS22_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS22_CH2, txpower);
+
+ /* MCS 23 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS23_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS23_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS23_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 6, &eeprom);
+
+ /* STBC, MCS 0,1 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_STBC0_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_STBC0_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+ TX_PWR_CFG_3_EXT_STBC0_CH2, txpower);
+
+ /* STBC, MCS 2,3 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_STBC2_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_STBC2_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+ TX_PWR_CFG_3_EXT_STBC2_CH2, txpower);
+
+ /* STBC, MCS 4,5 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE0,
+ txpower);
+
+ /* STBC, MCS 6 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE2, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE3, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE2,
+ txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 7, &eeprom);
+
+ /* STBC, MCS 7 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
+ TX_PWR_CFG_9_STBC7_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
+ TX_PWR_CFG_9_STBC7_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
+ TX_PWR_CFG_9_STBC7_CH2, txpower);
+
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, regs[TX_PWR_CFG_0_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, regs[TX_PWR_CFG_1_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, regs[TX_PWR_CFG_2_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, regs[TX_PWR_CFG_3_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, regs[TX_PWR_CFG_4_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_5, regs[TX_PWR_CFG_5_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_6, regs[TX_PWR_CFG_6_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, regs[TX_PWR_CFG_7_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, regs[TX_PWR_CFG_8_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, regs[TX_PWR_CFG_9_IDX]);
+
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_0_EXT,
+ regs[TX_PWR_CFG_0_EXT_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_1_EXT,
+ regs[TX_PWR_CFG_1_EXT_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_2_EXT,
+ regs[TX_PWR_CFG_2_EXT_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_3_EXT,
+ regs[TX_PWR_CFG_3_EXT_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_4_EXT,
+ regs[TX_PWR_CFG_4_EXT_IDX]);
+
+ for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
+ rt2x00_dbg(rt2x00dev,
+ "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
+ (band == IEEE80211_BAND_5GHZ) ? '5' : '2',
+ (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
+ '4' : '2',
+ (i > TX_PWR_CFG_9_IDX) ?
+ (i - TX_PWR_CFG_9_IDX - 1) : i,
+ (i > TX_PWR_CFG_9_IDX) ? "_EXT" : "",
+ (unsigned long) regs[i]);
+}
+
/*
* We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
* BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
@@ -3010,9 +4044,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
* EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to
* current conditions (i.e. band, bandwidth, temperature, user settings).
*/
-static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_channel *chan,
- int power_level)
+static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_channel *chan,
+ int power_level)
{
u8 txpower, r1;
u16 eeprom;
@@ -3080,8 +4114,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
rt2800_register_read(rt2x00dev, offset, &reg);
/* read the next four txpower values */
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
- &eeprom);
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ i, &eeprom);
is_rate_b = i ? 0 : 1;
/*
@@ -3129,8 +4163,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
/* read the next four txpower values */
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
- &eeprom);
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ i + 1, &eeprom);
is_rate_b = 0;
/*
@@ -3184,6 +4218,16 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
}
}
+static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_channel *chan,
+ int power_level)
+{
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level);
+ else
+ rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level);
+}
+
void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
{
rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.chandef.chan,
@@ -3219,6 +4263,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
+ case RF3053:
case RF3290:
case RF5360:
case RF5370:
@@ -3442,17 +4487,25 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
return ret;
rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
- rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
- rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
- rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
- rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0,
+ rt2800_get_beacon_offset(rt2x00dev, 0));
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1,
+ rt2800_get_beacon_offset(rt2x00dev, 1));
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2,
+ rt2800_get_beacon_offset(rt2x00dev, 2));
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3,
+ rt2800_get_beacon_offset(rt2x00dev, 3));
rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg);
rt2800_register_read(rt2x00dev, BCN_OFFSET1, &reg);
- rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
- rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
- rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
- rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4,
+ rt2800_get_beacon_offset(rt2x00dev, 4));
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5,
+ rt2800_get_beacon_offset(rt2x00dev, 5));
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6,
+ rt2800_get_beacon_offset(rt2x00dev, 6));
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7,
+ rt2800_get_beacon_offset(rt2x00dev, 7));
rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg);
rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
@@ -3528,7 +4581,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
+ &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x0000002c);
@@ -3559,6 +4613,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
} else if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+ } else if (rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3593, REV_RT3593E)) {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
+ &eeprom);
+ if (rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_DAC_TEST))
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000001f);
+ else
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000000f);
+ } else {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x00000000);
+ }
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT5592)) {
@@ -3786,14 +4857,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
/*
* Clear all beacons
*/
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
+ for (i = 0; i < 8; i++)
+ rt2800_clear_beacon_register(rt2x00dev, i);
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
@@ -3989,7 +5054,7 @@ static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev)
u8 value;
rt2800_bbp_read(rt2x00dev, 138, &value);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
value |= 0x20;
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
@@ -4332,6 +5397,22 @@ static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev)
rt2800_disable_unused_dac_adc(rt2x00dev);
}
+static void rt2800_init_bbp_3593(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_init_bbp_early(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 79, 0x13);
+ rt2800_bbp_write(rt2x00dev, 80, 0x05);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
+ rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
+ rt2800_bbp_write(rt2x00dev, 84, 0x19);
+
+ /* Enable DC filter */
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT3593, REV_RT3593E))
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+}
+
static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
{
int ant, div_mode;
@@ -4402,7 +5483,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
rt2800_disable_unused_dac_adc(rt2x00dev);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
div_mode = rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
@@ -4488,7 +5569,7 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
rt2800_bbp_read(rt2x00dev, 152, &value);
@@ -4547,6 +5628,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
case RT3572:
rt2800_init_bbp_3572(rt2x00dev);
break;
+ case RT3593:
+ rt2800_init_bbp_3593(rt2x00dev);
+ return;
case RT5390:
case RT5392:
rt2800_init_bbp_53xx(rt2x00dev);
@@ -4557,7 +5641,8 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
}
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_BBP_START, i,
+ &eeprom);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -4728,7 +5813,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3090)) {
/* Turn off unused DAC1 and ADC1 to reduce power consumption */
rt2800_bbp_read(rt2x00dev, 138, &bbp);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
@@ -4771,6 +5856,42 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
}
}
+static void rt2800_normal_mode_setup_3593(struct rt2x00_dev *rt2x00dev)
+{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u8 rfcsr;
+ u8 tx_gain;
+
+ rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+ tx_gain = rt2x00_get_field8(drv_data->txmixer_gain_24g,
+ RFCSR17_TXMIXER_GAIN);
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, tx_gain);
+ rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ /* TODO: enable stream mode */
+}
+
static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
{
u8 reg;
@@ -4778,7 +5899,7 @@ static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
/* Turn off unused DAC1 and ADC1 to reduce power consumption */
rt2800_bbp_read(rt2x00dev, 138, &reg);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(&reg, BBP138_RX_ADC1, 0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
@@ -4884,7 +6005,8 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
+ &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
else
@@ -5152,6 +6274,136 @@ static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev)
rt2800_normal_mode_setup_3xxx(rt2x00dev);
}
+static void rt3593_post_bbp_init(struct rt2x00_dev *rt2x00dev)
+{
+ u8 bbp;
+ bool txbf_enabled = false; /* FIXME */
+
+ rt2800_bbp_read(rt2x00dev, 105, &bbp);
+ if (rt2x00dev->default_ant.rx_chain_num == 1)
+ rt2x00_set_field8(&bbp, BBP105_MLD, 0);
+ else
+ rt2x00_set_field8(&bbp, BBP105_MLD, 1);
+ rt2800_bbp_write(rt2x00dev, 105, bbp);
+
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+ rt2800_bbp_write(rt2x00dev, 82, 0x82);
+ rt2800_bbp_write(rt2x00dev, 106, 0x05);
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+ rt2800_bbp_write(rt2x00dev, 88, 0x90);
+ rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+ rt2800_bbp_write(rt2x00dev, 47, 0x48);
+ rt2800_bbp_write(rt2x00dev, 120, 0x50);
+
+ if (txbf_enabled)
+ rt2800_bbp_write(rt2x00dev, 163, 0xbd);
+ else
+ rt2800_bbp_write(rt2x00dev, 163, 0x9d);
+
+ /* SNR mapping */
+ rt2800_bbp_write(rt2x00dev, 142, 6);
+ rt2800_bbp_write(rt2x00dev, 143, 160);
+ rt2800_bbp_write(rt2x00dev, 142, 7);
+ rt2800_bbp_write(rt2x00dev, 143, 161);
+ rt2800_bbp_write(rt2x00dev, 142, 8);
+ rt2800_bbp_write(rt2x00dev, 143, 162);
+
+ /* ADC/DAC control */
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
+ /* RX AGC energy lower bound in log2 */
+ rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
+ /* FIXME: BBP 105 owerwrite? */
+ rt2800_bbp_write(rt2x00dev, 105, 0x04);
+
+}
+
+static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev)
+{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u32 reg;
+ u8 rfcsr;
+
+ /* Disable GPIO #4 and #7 function for LAN PE control */
+ rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_4, 0);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_7, 0);
+ rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
+
+ /* Initialize default register values */
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x4e);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x78);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x3b);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x3c);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x86);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0xd3);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x8e);
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x86);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x75);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x18);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x18);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x18);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x6e);
+
+ /* Initiate calibration */
+ /* TODO: use rt2800_rf_init_calibration ? */
+ rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+
+ rt2800_adjust_freq_offset(rt2x00dev);
+
+ rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1);
+ rt2800_rfcsr_write(rt2x00dev, 18, rfcsr);
+
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+ usleep_range(1000, 1500);
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+
+ /* Set initial values for RX filter calibration */
+ drv_data->calibration_bw20 = 0x1f;
+ drv_data->calibration_bw40 = 0x2f;
+
+ /* Save BBP 25 & 26 values for later use in channel switching */
+ rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
+ rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
+
+ rt2800_led_open_drain_enable(rt2x00dev);
+ rt2800_normal_mode_setup_3593(rt2x00dev);
+
+ rt3593_post_bbp_init(rt2x00dev);
+
+ /* TODO: enable stream mode support */
+}
+
static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 2);
@@ -5380,6 +6632,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
case RT3572:
rt2800_init_rfcsr_3572(rt2x00dev);
break;
+ case RT3593:
+ rt2800_init_rfcsr_3593(rt2x00dev);
+ break;
case RT5390:
rt2800_init_rfcsr_5390(rt2x00dev);
break;
@@ -5456,15 +6711,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize LED control
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff,
word & 0xff, (word >> 8) & 0xff);
@@ -5560,6 +6815,34 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
+static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev)
+{
+ u16 word;
+
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ return 0;
+
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
+ if ((word & 0x00ff) != 0x00ff)
+ return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
+
+ return 0;
+}
+
+static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev)
+{
+ u16 word;
+
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ return 0;
+
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
+ if ((word & 0x00ff) != 0x00ff)
+ return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
+
+ return 0;
+}
+
static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
@@ -5578,18 +6861,18 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Start validation of the data that has been read.
*/
- mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
+ mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
eth_random_addr(mac);
rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1);
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
} else if (rt2x00_rt(rt2x00dev, RT2860) ||
rt2x00_rt(rt2x00dev, RT2872)) {
@@ -5598,10 +6881,10 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2)
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0);
@@ -5618,24 +6901,24 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
if ((word & 0x00ff) == 0x00ff) {
rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
}
if ((word & 0xff00) == 0xff00) {
rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
LED_MODE_TXRX_ACTIVITY);
rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
rt2x00_eeprom_dbg(rt2x00dev, "Led Mode: 0x%04x\n", word);
}
@@ -5644,56 +6927,61 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
* lna0 as correct value. Note that EEPROM_LNA
* is never validated.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
- if ((word & 0x00ff) != 0x00ff) {
- drv_data->txmixer_gain_24g =
- rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
- } else {
- drv_data->txmixer_gain_24g = 0;
- }
+ drv_data->txmixer_gain_24g = rt2800_get_txmixer_gain_24g(rt2x00dev);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
- if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
- rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
- rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
- default_lna_gain);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
-
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
- if ((word & 0x00ff) != 0x00ff) {
- drv_data->txmixer_gain_5g =
- rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
- } else {
- drv_data->txmixer_gain_5g = 0;
+ if (!rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
+ default_lna_gain);
}
+ rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
+ drv_data->txmixer_gain_5g = rt2800_get_txmixer_gain_5g(rt2x00dev);
+
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
- if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
- rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
- rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
- default_lna_gain);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+ if (!rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
+ default_lna_gain);
+ }
+ rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &word);
+ if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
+ default_lna_gain);
+ if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
+ default_lna_gain);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_EXT_LNA2, word);
+ }
return 0;
}
@@ -5707,7 +6995,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Read EEPROM word for configuration.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
/*
* Identify RF chipset by EEPROM value
@@ -5717,7 +7005,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
- rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
else
rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -5731,6 +7019,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3021:
case RF3022:
case RF3052:
+ case RF3053:
case RF3290:
case RF3320:
case RF3322:
@@ -5757,7 +7046,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00dev->default_ant.rx_chain_num =
rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3090) ||
@@ -5810,7 +7099,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Read frequency offset and RF programming sequence.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
/*
@@ -5827,7 +7116,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Check if support EIRP tx power limit feature.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
EIRP_MAX_TX_POWER_LIMIT)
@@ -6109,12 +7398,79 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
{196, 83, 0, 12, 1},
};
+static const struct rf_channel rf_vals_3053[] = {
+ /* Channel, N, R, K */
+ {1, 241, 2, 2},
+ {2, 241, 2, 7},
+ {3, 242, 2, 2},
+ {4, 242, 2, 7},
+ {5, 243, 2, 2},
+ {6, 243, 2, 7},
+ {7, 244, 2, 2},
+ {8, 244, 2, 7},
+ {9, 245, 2, 2},
+ {10, 245, 2, 7},
+ {11, 246, 2, 2},
+ {12, 246, 2, 7},
+ {13, 247, 2, 2},
+ {14, 248, 2, 4},
+
+ {36, 0x56, 0, 4},
+ {38, 0x56, 0, 6},
+ {40, 0x56, 0, 8},
+ {44, 0x57, 0, 0},
+ {46, 0x57, 0, 2},
+ {48, 0x57, 0, 4},
+ {52, 0x57, 0, 8},
+ {54, 0x57, 0, 10},
+ {56, 0x58, 0, 0},
+ {60, 0x58, 0, 4},
+ {62, 0x58, 0, 6},
+ {64, 0x58, 0, 8},
+
+ {100, 0x5B, 0, 8},
+ {102, 0x5B, 0, 10},
+ {104, 0x5C, 0, 0},
+ {108, 0x5C, 0, 4},
+ {110, 0x5C, 0, 6},
+ {112, 0x5C, 0, 8},
+
+ /* NOTE: Channel 114 has been removed intentionally.
+ * The EEPROM contains no TX power values for that,
+ * and it is disabled in the vendor driver as well.
+ */
+
+ {116, 0x5D, 0, 0},
+ {118, 0x5D, 0, 2},
+ {120, 0x5D, 0, 4},
+ {124, 0x5D, 0, 8},
+ {126, 0x5D, 0, 10},
+ {128, 0x5E, 0, 0},
+ {132, 0x5E, 0, 4},
+ {134, 0x5E, 0, 6},
+ {136, 0x5E, 0, 8},
+ {140, 0x5F, 0, 0},
+
+ {149, 0x5F, 0, 9},
+ {151, 0x5F, 0, 11},
+ {153, 0x60, 0, 1},
+ {157, 0x60, 0, 5},
+ {159, 0x60, 0, 7},
+ {161, 0x60, 0, 9},
+ {165, 0x61, 0, 1},
+ {167, 0x61, 0, 3},
+ {169, 0x61, 0, 5},
+ {171, 0x61, 0, 7},
+ {173, 0x61, 0, 9},
+};
+
static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
char *default_power1;
char *default_power2;
+ char *default_power3;
unsigned int i;
u16 eeprom;
u32 reg;
@@ -6149,7 +7505,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
- rt2x00_eeprom_addr(rt2x00dev,
+ rt2800_eeprom_addr(rt2x00dev,
EEPROM_MAC_ADDR_0));
/*
@@ -6165,7 +7521,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->max_report_rates = 7;
rt2x00dev->hw->max_rate_tries = 1;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
/*
* Initialize hw_mode information.
@@ -6200,6 +7556,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_3x);
spec->channels = rf_vals_3x;
+ } else if (rt2x00_rf(rt2x00dev, RF3053)) {
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ spec->num_channels = ARRAY_SIZE(rf_vals_3053);
+ spec->channels = rf_vals_3053;
} else if (rt2x00_rf(rt2x00dev, RF5592)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
@@ -6265,21 +7625,40 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
- default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
- default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+ default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
+ default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+
+ if (rt2x00dev->default_ant.tx_chain_num > 2)
+ default_power3 = rt2800_eeprom_addr(rt2x00dev,
+ EEPROM_EXT_TXPOWER_BG3);
+ else
+ default_power3 = NULL;
for (i = 0; i < 14; i++) {
info[i].default_power1 = default_power1[i];
info[i].default_power2 = default_power2[i];
+ if (default_power3)
+ info[i].default_power3 = default_power3[i];
}
if (spec->num_channels > 14) {
- default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
- default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
+ default_power1 = rt2800_eeprom_addr(rt2x00dev,
+ EEPROM_TXPOWER_A1);
+ default_power2 = rt2800_eeprom_addr(rt2x00dev,
+ EEPROM_TXPOWER_A2);
+
+ if (rt2x00dev->default_ant.tx_chain_num > 2)
+ default_power3 =
+ rt2800_eeprom_addr(rt2x00dev,
+ EEPROM_EXT_TXPOWER_A3);
+ else
+ default_power3 = NULL;
for (i = 14; i < spec->num_channels; i++) {
info[i].default_power1 = default_power1[i - 14];
info[i].default_power2 = default_power2[i - 14];
+ if (default_power3)
+ info[i].default_power3 = default_power3[i - 14];
}
}
@@ -6290,6 +7669,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3022:
case RF3320:
case RF3052:
+ case RF3053:
case RF3290:
case RF5360:
case RF5370:
@@ -6328,6 +7708,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
case RT3352:
case RT3390:
case RT3572:
+ case RT3593:
case RT5390:
case RT5392:
case RT5592:
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 6ec739466db4..a94ba447e63c 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -226,4 +226,8 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
+void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
+ unsigned short *txwi_size,
+ unsigned short *rxwi_size);
+
#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 00055627eb8d..f8f2abbfbb65 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -507,9 +507,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
if (rt2x00_is_pcie(rt2x00dev) &&
- (rt2x00_rt(rt2x00dev, RT3572) ||
+ (rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))) {
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT5592))) {
rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -1189,12 +1193,17 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
static void rt2800pci_queue_init(struct data_queue *queue)
{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ unsigned short txwi_size, rxwi_size;
+
+ rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
+
switch (queue->qid) {
case QID_RX:
queue->limit = 128;
queue->data_size = AGGREGATION_SIZE;
queue->desc_size = RXD_DESC_SIZE;
- queue->winfo_size = RXWI_DESC_SIZE_4WORDS;
+ queue->winfo_size = rxwi_size;
queue->priv_size = sizeof(struct queue_entry_priv_mmio);
break;
@@ -1205,7 +1214,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
queue->limit = 64;
queue->data_size = AGGREGATION_SIZE;
queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+ queue->winfo_size = txwi_size;
queue->priv_size = sizeof(struct queue_entry_priv_mmio);
break;
@@ -1213,7 +1222,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
queue->limit = 8;
queue->data_size = 0; /* No DMA required for beacons */
queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+ queue->winfo_size = txwi_size;
queue->priv_size = sizeof(struct queue_entry_priv_mmio);
break;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 840833b26bfa..96961b9a395c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -854,13 +854,7 @@ static void rt2800usb_queue_init(struct data_queue *queue)
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
unsigned short txwi_size, rxwi_size;
- if (rt2x00_rt(rt2x00dev, RT5592)) {
- txwi_size = TXWI_DESC_SIZE_5WORDS;
- rxwi_size = RXWI_DESC_SIZE_6WORDS;
- } else {
- txwi_size = TXWI_DESC_SIZE_4WORDS;
- rxwi_size = RXWI_DESC_SIZE_4WORDS;
- }
+ rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
switch (queue->qid) {
case QID_RX:
@@ -977,6 +971,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0411, 0x016f) },
{ USB_DEVICE(0x0411, 0x01a2) },
{ USB_DEVICE(0x0411, 0x01ee) },
+ { USB_DEVICE(0x0411, 0x01a8) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x002f) },
{ USB_DEVICE(0x07aa, 0x003c) },
@@ -1194,6 +1189,40 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0284) },
#endif
+#ifdef CONFIG_RT2800USB_RT3573
+ /* AirLive */
+ { USB_DEVICE(0x1b75, 0x7733) },
+ /* ASUS */
+ { USB_DEVICE(0x0b05, 0x17bc) },
+ { USB_DEVICE(0x0b05, 0x17ad) },
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x1103) },
+ /* Cameo */
+ { USB_DEVICE(0x148f, 0xf301) },
+ /* Edimax */
+ { USB_DEVICE(0x7392, 0x7733) },
+ /* Hawking */
+ { USB_DEVICE(0x0e66, 0x0020) },
+ { USB_DEVICE(0x0e66, 0x0021) },
+ /* I-O DATA */
+ { USB_DEVICE(0x04bb, 0x094e) },
+ /* Linksys */
+ { USB_DEVICE(0x13b1, 0x003b) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x016b) },
+ /* NETGEAR */
+ { USB_DEVICE(0x0846, 0x9012) },
+ { USB_DEVICE(0x0846, 0x9019) },
+ /* Planex */
+ { USB_DEVICE(0x2019, 0xed19) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x3573) },
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x0067) },
+ { USB_DEVICE(0x0df6, 0x006a) },
+ /* ZyXEL */
+ { USB_DEVICE(0x0586, 0x3421) },
+#endif
#ifdef CONFIG_RT2800USB_RT53XX
/* Arcadyan */
{ USB_DEVICE(0x043e, 0x7a12) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index ee3fc570b11d..fe4c572db52c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -211,6 +211,7 @@ struct channel_info {
short max_power;
short default_power1;
short default_power2;
+ short default_power3;
};
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index b16521e6bf4a..712eea9d398f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -566,10 +566,10 @@ static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev,
#undef TID_CHECK
- if (compare_ether_addr(ba->ra, entry->ta))
+ if (!ether_addr_equal(ba->ra, entry->ta))
continue;
- if (compare_ether_addr(ba->ta, entry->ra))
+ if (!ether_addr_equal(ba->ta, entry->ra))
continue;
/* Mark BAR since we received the according BA */
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index aa95c6cf3545..6c8a33b6ee22 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -936,7 +936,7 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
-void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
+static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
{
switch (queue->qid) {
case QID_AC_VO:
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 91a04e2b8ece..fc207b268e4f 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -3,10 +3,10 @@
* Linux device driver for RTL8180 / RTL8185
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* Thanks to Realtek for their support!
*
@@ -32,7 +32,7 @@
#include "grf5101.h"
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
-MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
+MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
index 077ff92cc139..dc845693f321 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
@@ -2,7 +2,7 @@
/*
* Radio tuning for GCT GRF5101 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
index 76647111bcff..4d80a2785123 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
@@ -4,7 +4,7 @@
/*
* Radio tuning for GCT GRF5101 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
index 4715000c94dd..a63c443c3c6f 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
@@ -1,7 +1,7 @@
/*
* Radio tuning for Maxim max2820 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.h b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
index 61cf6d1e7d57..8e982b72b690 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
@@ -4,7 +4,7 @@
/*
* Radio tuning for Maxim max2820 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index cc2a5412c1f0..ee638d0749d6 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -3,10 +3,10 @@
* Radio tuning for RTL8225 on RTL8180
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8180 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* Thanks to Realtek for their support!
*
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
index b3ec40f6bd23..7614d9ccc729 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
@@ -2,7 +2,7 @@
/*
* Radio tuning for Philips SA2400 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
index a4aaa0d413f1..fb0093f35148 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
@@ -4,7 +4,7 @@
/*
* Radio tuning for Philips SA2400 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index f49220e234b0..841fb9dfc9da 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -2,10 +2,10 @@
* Linux device driver for RTL8187
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* The driver was extended to the RTL8187B in 2008 by:
* Herton Ronaldo Krzesinski <herton@mandriva.com.br>
@@ -37,7 +37,7 @@
#include "rfkill.h"
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
-MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
+MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
MODULE_AUTHOR("Herton Ronaldo Krzesinski <herton@mandriva.com.br>");
MODULE_AUTHOR("Hin-Tak Leung <htl10@users.sourceforge.net>");
MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index e19a20a8e955..56aee067f324 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -2,10 +2,10 @@
* Definitions for RTL8187 hardware
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
index f0bf35fedbaf..a26193a04447 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
@@ -2,10 +2,10 @@
* Radio tuning for RTL8225 on RTL8187
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* Magic delays, register offsets, and phy value tables below are
* taken from the original r8187 driver sources. Thanks to Realtek
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
index 20c5b6ead0f6..141afb09a5b4 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
@@ -2,10 +2,10 @@
* Radio tuning definitions for RTL8225 on RTL8187
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 1615f63b02f6..ce23dfd42381 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -2,10 +2,10 @@
* Definitions for RTL818x hardware
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 7651f5acc14b..8bb4a9a01a18 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1304,7 +1304,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
/* and only beacons from the associated BSSID, please */
- if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
return;
rtlpriv->link_info.bcn_rx_inperiod++;
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 298b615964e8..0d81f766fd0f 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -688,7 +688,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
find_p2p_ie = true;
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE(&ie[1]);
+ noa_len = READEF2BYTE((__le16 *)&ie[1]);
if (ie + 3 + ie[1] > end)
return;
@@ -717,13 +717,13 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
READEF1BYTE(ie+index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
}
@@ -780,7 +780,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE(&ie[1]);
+ noa_len = READEF2BYTE((__le16 *)&ie[1]);
if (ie + 3 + ie[1] > end)
return;
@@ -809,13 +809,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
READEF1BYTE(ie+index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
}
@@ -923,7 +923,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
return;
/* and only beacons from the associated BSSID, please */
- if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
return;
/* check if this really is a beacon */
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index f9f059dadb73..a98acefb8c06 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -218,6 +218,7 @@ static void rtl_tx_status(void *ppriv,
static void rtl_rate_init(void *ppriv,
struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index a8871d66d56a..68685a898257 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -305,13 +305,14 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
psaddr = ieee80211_get_SA(hdr);
memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
- addr = (!compare_ether_addr(mac->bssid, (ufc & IEEE80211_FCTL_TODS) ?
- hdr->addr1 : (ufc & IEEE80211_FCTL_FROMDS) ?
- hdr->addr2 : hdr->addr3));
+ addr = ether_addr_equal(mac->bssid,
+ (ufc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+ (ufc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+ hdr->addr3);
match_bssid = ((IEEE80211_FTYPE_CTL != type) && (!pstatus->hwerror) &&
(!pstatus->crc) && (!pstatus->icv)) && addr;
- addr = (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+ addr = ether_addr_equal(praddr, rtlefuse->dev_addr);
packet_toself = match_bssid && addr;
if (ieee80211_is_beacon(fc))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index 8e3ec1e25644..0f7812e0c8aa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -109,5 +109,8 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
+void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index 262e1e4c6e5b..a1310abd0d54 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -49,8 +49,5 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr, u32 bitmask);
void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index c72758d8f4ed..bcd82a1020a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -255,16 +255,16 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
- packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
- (!compare_ether_addr(mac->bssid,
- (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ?
- hdr->addr1 : (le16_to_cpu(fc) &
- IEEE80211_FCTL_FROMDS) ?
- hdr->addr2 : hdr->addr3)) && (!pstatus->hwerror) &&
- (!pstatus->crc) && (!pstatus->icv));
-
- packet_toself = packet_matchbssid &&
- (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+ packet_matchbssid =
+ ((IEEE80211_FTYPE_CTL != type) &&
+ ether_addr_equal(mac->bssid,
+ (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+ (le16_to_cpu(fc) & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+ hdr->addr3) &&
+ (!pstatus->hwerror) && (!pstatus->crc) && (!pstatus->icv));
+
+ packet_toself = (packet_matchbssid &&
+ ether_addr_equal(praddr, rtlefuse->dev_addr));
if (ieee80211_is_beacon(fc))
packet_beacon = true;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b8db55c868c7..38995f90040d 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1315,7 +1315,7 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
#ifdef CONFIG_PM
static int
-wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
+wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
{
int num_fields = 0, in_field = 0, fields_size = 0;
int i, pattern_len = 0;
@@ -1458,9 +1458,9 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
* Allocates an RX filter returned through f
* which needs to be freed using rx_filter_free()
*/
-static int wl1271_convert_wowlan_pattern_to_rx_filter(
- struct cfg80211_wowlan_trig_pkt_pattern *p,
- struct wl12xx_rx_filter **f)
+static int
+wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
+ struct wl12xx_rx_filter **f)
{
int i, j, ret = 0;
struct wl12xx_rx_filter *filter;
@@ -1562,7 +1562,7 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
/* Translate WoWLAN patterns into filters */
for (i = 0; i < wow->n_patterns; i++) {
- struct cfg80211_wowlan_trig_pkt_pattern *p;
+ struct cfg80211_pkt_pattern *p;
struct wl12xx_rx_filter *filter = NULL;
p = &wow->patterns[i];
@@ -5623,7 +5623,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->max_remain_on_channel_duration = 5000;
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
/* make sure all our channels fit in the scanned_ch bitmask */
BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index f3442762d884..527590f2adfb 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -356,7 +356,8 @@ out:
return ret;
}
-int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
+int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
{
struct wl1271 *wl = hw->priv;
struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
diff --git a/drivers/net/wireless/ti/wlcore/testmode.h b/drivers/net/wireless/ti/wlcore/testmode.h
index 8071654259ea..61d8434d859a 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.h
+++ b/drivers/net/wireless/ti/wlcore/testmode.h
@@ -26,6 +26,7 @@
#include <net/mac80211.h>
-int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len);
+int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
#endif /* __WL1271_TESTMODE_H__ */
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index b8ba1f925e75..d39c4178c33a 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -75,8 +75,10 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
len = fw_entry->size;
buf = kmalloc(1024, GFP_ATOMIC);
- if (!buf)
+ if (!buf) {
+ err = -ENOMEM;
goto exit;
+ }
while (len > 0) {
int translen = (len > 1024) ? 1024 : len;
@@ -1764,8 +1766,10 @@ static int zd1201_probe(struct usb_interface *interface,
zd->endp_out2 = 2;
zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!zd->rx_urb || !zd->tx_urb)
+ if (!zd->rx_urb || !zd->tx_urb) {
+ err = -ENOMEM;
goto err_zd;
+ }
mdelay(100);
err = zd1201_drvr_start(zd);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a4d77ee9c5b..a1977430ddfb 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -45,31 +45,109 @@
#include <xen/grant_table.h>
#include <xen/xenbus.h>
-struct xen_netbk;
+typedef unsigned int pending_ring_idx_t;
+#define INVALID_PENDING_RING_IDX (~0U)
+
+/* For the head field in pending_tx_info: it is used to indicate
+ * whether this tx info is the head of one or more coalesced requests.
+ *
+ * When head != INVALID_PENDING_RING_IDX, it means the start of a new
+ * tx requests queue and the end of previous queue.
+ *
+ * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
+ *
+ * ...|0 I I I|5 I|9 I I I|...
+ * -->|<-INUSE----------------
+ *
+ * After consuming the first slot(s) we have:
+ *
+ * ...|V V V V|5 I|9 I I I|...
+ * -----FREE->|<-INUSE--------
+ *
+ * where V stands for "valid pending ring index". Any number other
+ * than INVALID_PENDING_RING_IDX is OK. These entries are considered
+ * free and can contain any number other than
+ * INVALID_PENDING_RING_IDX. In practice we use 0.
+ *
+ * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
+ * above example) number is the index into pending_tx_info and
+ * mmap_pages arrays.
+ */
+struct pending_tx_info {
+ struct xen_netif_tx_request req; /* coalesced tx request */
+ pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
+ * if it is head of one or more tx
+ * reqs
+ */
+};
+
+#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
+#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
+
+struct xenvif_rx_meta {
+ int id;
+ int size;
+ int gso_size;
+};
+
+/* Discriminate from any valid pending_idx value. */
+#define INVALID_PENDING_IDX 0xFFFF
+
+#define MAX_BUFFER_OFFSET PAGE_SIZE
+
+#define MAX_PENDING_REQS 256
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
unsigned int handle;
- /* Reference to netback processing backend. */
- struct xen_netbk *netbk;
+ /* Use NAPI for guest TX */
+ struct napi_struct napi;
+ /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
+ unsigned int tx_irq;
+ /* Only used when feature-split-event-channels = 1 */
+ char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
+ struct xen_netif_tx_back_ring tx;
+ struct sk_buff_head tx_queue;
+ struct page *mmap_pages[MAX_PENDING_REQS];
+ pending_ring_idx_t pending_prod;
+ pending_ring_idx_t pending_cons;
+ u16 pending_ring[MAX_PENDING_REQS];
+ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+
+ /* Coalescing tx requests before copying makes number of grant
+ * copy ops greater or equal to number of slots required. In
+ * worst case a tx request consumes 2 gnttab_copy.
+ */
+ struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
- u8 fe_dev_addr[6];
+ /* Use kthread for guest RX */
+ struct task_struct *task;
+ wait_queue_head_t wq;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
- unsigned int tx_irq;
unsigned int rx_irq;
/* Only used when feature-split-event-channels = 1 */
- char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
+ struct xen_netif_rx_back_ring rx;
+ struct sk_buff_head rx_queue;
- /* List of frontends to notify after a batch of frames sent. */
- struct list_head notify_list;
+ /* Allow xenvif_start_xmit() to peek ahead in the rx request
+ * ring. This is a prediction of what rx_req_cons will be
+ * once all queued skbs are put on the ring.
+ */
+ RING_IDX rx_req_cons_peek;
+
+ /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
+ * head/fragment page uses 2 copy operations because it
+ * straddles two buffers in the frontend.
+ */
+ struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
+ struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
- /* The shared rings and indexes. */
- struct xen_netif_tx_back_ring tx;
- struct xen_netif_rx_back_ring rx;
+
+ u8 fe_dev_addr[6];
/* Frontend feature information. */
u8 can_sg:1;
@@ -80,13 +158,6 @@ struct xenvif {
/* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */
- /*
- * Allow xenvif_start_xmit() to peek ahead in the rx request
- * ring. This is a prediction of what rx_req_cons will be
- * once all queued skbs are put on the ring.
- */
- RING_IDX rx_req_cons_peek;
-
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
unsigned long credit_bytes;
unsigned long credit_usec;
@@ -97,11 +168,7 @@ struct xenvif {
unsigned long rx_gso_checksum_fixup;
/* Miscellaneous private stuff. */
- struct list_head schedule_list;
- atomic_t refcnt;
struct net_device *dev;
-
- wait_queue_head_t waiting_to_free;
};
static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
@@ -109,9 +176,6 @@ static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
return to_xenbus_device(vif->dev->dev.parent);
}
-#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
-#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
-
struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
unsigned int handle);
@@ -121,39 +185,26 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned int rx_evtchn);
void xenvif_disconnect(struct xenvif *vif);
-void xenvif_get(struct xenvif *vif);
-void xenvif_put(struct xenvif *vif);
-
int xenvif_xenbus_init(void);
void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
-int xen_netbk_rx_ring_full(struct xenvif *vif);
+int xenvif_rx_ring_full(struct xenvif *vif);
-int xen_netbk_must_stop_queue(struct xenvif *vif);
+int xenvif_must_stop_queue(struct xenvif *vif);
/* (Un)Map communication rings. */
-void xen_netbk_unmap_frontend_rings(struct xenvif *vif);
-int xen_netbk_map_frontend_rings(struct xenvif *vif,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref);
-
-/* (De)Register a xenvif with the netback backend. */
-void xen_netbk_add_xenvif(struct xenvif *vif);
-void xen_netbk_remove_xenvif(struct xenvif *vif);
-
-/* (De)Schedule backend processing for a xenvif */
-void xen_netbk_schedule_xenvif(struct xenvif *vif);
-void xen_netbk_deschedule_xenvif(struct xenvif *vif);
+void xenvif_unmap_frontend_rings(struct xenvif *vif);
+int xenvif_map_frontend_rings(struct xenvif *vif,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
-void xen_netbk_check_rx_xenvif(struct xenvif *vif);
-/* Receive an SKB from the frontend */
-void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb);
+void xenvif_check_rx_xenvif(struct xenvif *vif);
/* Queue an SKB for transmission to the frontend */
-void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
/* Notify xenvif that ring now has space to send an skb to the frontend */
void xenvif_notify_tx_completion(struct xenvif *vif);
@@ -161,7 +212,12 @@ void xenvif_notify_tx_completion(struct xenvif *vif);
void xenvif_carrier_off(struct xenvif *vif);
/* Returns number of ring slots required to send an skb to the frontend */
-unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+
+int xenvif_tx_action(struct xenvif *vif, int budget);
+void xenvif_rx_action(struct xenvif *vif);
+
+int xenvif_kthread(void *data);
extern bool separate_tx_rx_irq;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 087d2db0389d..625c6f49cfba 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -30,6 +30,7 @@
#include "common.h"
+#include <linux/kthread.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
@@ -38,17 +39,7 @@
#include <asm/xen/hypercall.h>
#define XENVIF_QUEUE_LENGTH 32
-
-void xenvif_get(struct xenvif *vif)
-{
- atomic_inc(&vif->refcnt);
-}
-
-void xenvif_put(struct xenvif *vif)
-{
- if (atomic_dec_and_test(&vif->refcnt))
- wake_up(&vif->waiting_to_free);
-}
+#define XENVIF_NAPI_WEIGHT 64
int xenvif_schedulable(struct xenvif *vif)
{
@@ -57,28 +48,62 @@ int xenvif_schedulable(struct xenvif *vif)
static int xenvif_rx_schedulable(struct xenvif *vif)
{
- return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
+ return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
}
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- if (vif->netbk == NULL)
- return IRQ_HANDLED;
-
- xen_netbk_schedule_xenvif(vif);
+ if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
+ napi_schedule(&vif->napi);
return IRQ_HANDLED;
}
+static int xenvif_poll(struct napi_struct *napi, int budget)
+{
+ struct xenvif *vif = container_of(napi, struct xenvif, napi);
+ int work_done;
+
+ work_done = xenvif_tx_action(vif, budget);
+
+ if (work_done < budget) {
+ int more_to_do = 0;
+ unsigned long flags;
+
+ /* It is necessary to disable IRQ before calling
+ * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
+ * lose event from the frontend.
+ *
+ * Consider:
+ * RING_HAS_UNCONSUMED_REQUESTS
+ * <frontend generates event to trigger napi_schedule>
+ * __napi_complete
+ *
+ * This handler is still in scheduled state so the
+ * event has no effect at all. After __napi_complete
+ * this handler is descheduled and cannot get
+ * scheduled again. We lose event in this case and the ring
+ * will be completely stalled.
+ */
+
+ local_irq_save(flags);
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
+ if (!more_to_do)
+ __napi_complete(napi);
+
+ local_irq_restore(flags);
+ }
+
+ return work_done;
+}
+
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- if (vif->netbk == NULL)
- return IRQ_HANDLED;
-
if (xenvif_rx_schedulable(vif))
netif_wake_queue(vif->dev);
@@ -99,7 +124,8 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(skb->dev != dev);
- if (vif->netbk == NULL)
+ /* Drop the packet if vif is not ready */
+ if (vif->task == NULL)
goto drop;
/* Drop the packet if the target domain has no receive buffers. */
@@ -107,13 +133,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
/* Reserve ring slots for the worst-case number of fragments. */
- vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
- xenvif_get(vif);
+ vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
- if (vif->can_queue && xen_netbk_must_stop_queue(vif))
+ if (vif->can_queue && xenvif_must_stop_queue(vif))
netif_stop_queue(dev);
- xen_netbk_queue_tx_skb(vif, skb);
+ xenvif_queue_tx_skb(vif, skb);
return NETDEV_TX_OK;
@@ -123,11 +148,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
-{
- netif_rx_ni(skb);
-}
-
void xenvif_notify_tx_completion(struct xenvif *vif)
{
if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
@@ -142,21 +162,20 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
static void xenvif_up(struct xenvif *vif)
{
- xen_netbk_add_xenvif(vif);
+ napi_enable(&vif->napi);
enable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq)
enable_irq(vif->rx_irq);
- xen_netbk_check_rx_xenvif(vif);
+ xenvif_check_rx_xenvif(vif);
}
static void xenvif_down(struct xenvif *vif)
{
+ napi_disable(&vif->napi);
disable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq)
disable_irq(vif->rx_irq);
del_timer_sync(&vif->credit_timeout);
- xen_netbk_deschedule_xenvif(vif);
- xen_netbk_remove_xenvif(vif);
}
static int xenvif_open(struct net_device *dev)
@@ -272,11 +291,12 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
struct net_device *dev;
struct xenvif *vif;
char name[IFNAMSIZ] = {};
+ int i;
snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
if (dev == NULL) {
- pr_warn("Could not allocate netdev\n");
+ pr_warn("Could not allocate netdev for %s\n", name);
return ERR_PTR(-ENOMEM);
}
@@ -285,14 +305,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif = netdev_priv(dev);
vif->domid = domid;
vif->handle = handle;
- vif->netbk = NULL;
vif->can_sg = 1;
vif->csum = 1;
- atomic_set(&vif->refcnt, 1);
- init_waitqueue_head(&vif->waiting_to_free);
vif->dev = dev;
- INIT_LIST_HEAD(&vif->schedule_list);
- INIT_LIST_HEAD(&vif->notify_list);
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
@@ -307,6 +322,16 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
+ skb_queue_head_init(&vif->rx_queue);
+ skb_queue_head_init(&vif->tx_queue);
+
+ vif->pending_cons = 0;
+ vif->pending_prod = MAX_PENDING_REQS;
+ for (i = 0; i < MAX_PENDING_REQS; i++)
+ vif->pending_ring[i] = i;
+ for (i = 0; i < MAX_PENDING_REQS; i++)
+ vif->mmap_pages[i] = NULL;
+
/*
* Initialise a dummy MAC address. We choose the numerically
* largest non-broadcast address to prevent the address getting
@@ -316,6 +341,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
memset(dev->dev_addr, 0xFF, ETH_ALEN);
dev->dev_addr[0] &= ~0x01;
+ netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
+
netif_carrier_off(dev);
err = register_netdev(dev);
@@ -341,7 +368,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
__module_get(THIS_MODULE);
- err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
+ err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
@@ -377,7 +404,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->rx_irq);
}
- xenvif_get(vif);
+ init_waitqueue_head(&vif->wq);
+ vif->task = kthread_create(xenvif_kthread,
+ (void *)vif, vif->dev->name);
+ if (IS_ERR(vif->task)) {
+ pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
+ err = PTR_ERR(vif->task);
+ goto err_rx_unbind;
+ }
rtnl_lock();
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
@@ -388,12 +422,18 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
xenvif_up(vif);
rtnl_unlock();
+ wake_up_process(vif->task);
+
return 0;
+
+err_rx_unbind:
+ unbind_from_irqhandler(vif->rx_irq, vif);
+ vif->rx_irq = 0;
err_tx_unbind:
unbind_from_irqhandler(vif->tx_irq, vif);
vif->tx_irq = 0;
err_unmap:
- xen_netbk_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(vif);
err:
module_put(THIS_MODULE);
return err;
@@ -408,7 +448,6 @@ void xenvif_carrier_off(struct xenvif *vif)
if (netif_running(dev))
xenvif_down(vif);
rtnl_unlock();
- xenvif_put(vif);
}
void xenvif_disconnect(struct xenvif *vif)
@@ -422,9 +461,6 @@ void xenvif_disconnect(struct xenvif *vif)
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
- atomic_dec(&vif->refcnt);
- wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
-
if (vif->tx_irq) {
if (vif->tx_irq == vif->rx_irq)
unbind_from_irqhandler(vif->tx_irq, vif);
@@ -438,9 +474,14 @@ void xenvif_disconnect(struct xenvif *vif)
need_module_put = 1;
}
+ if (vif->task)
+ kthread_stop(vif->task);
+
+ netif_napi_del(&vif->napi);
+
unregister_netdev(vif->dev);
- xen_netbk_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(vif);
free_netdev(vif->dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 64828de25d9a..956130c70036 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -70,131 +70,26 @@ module_param(fatal_skb_slots, uint, 0444);
*/
#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
-typedef unsigned int pending_ring_idx_t;
-#define INVALID_PENDING_RING_IDX (~0U)
-
-struct pending_tx_info {
- struct xen_netif_tx_request req; /* coalesced tx request */
- struct xenvif *vif;
- pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
- * if it is head of one or more tx
- * reqs
- */
-};
-
-struct netbk_rx_meta {
- int id;
- int size;
- int gso_size;
-};
-
-#define MAX_PENDING_REQS 256
-
-/* Discriminate from any valid pending_idx value. */
-#define INVALID_PENDING_IDX 0xFFFF
-
-#define MAX_BUFFER_OFFSET PAGE_SIZE
-
-/* extra field used in struct page */
-union page_ext {
- struct {
-#if BITS_PER_LONG < 64
-#define IDX_WIDTH 8
-#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
- unsigned int group:GROUP_WIDTH;
- unsigned int idx:IDX_WIDTH;
-#else
- unsigned int group, idx;
-#endif
- } e;
- void *mapping;
-};
-
-struct xen_netbk {
- wait_queue_head_t wq;
- struct task_struct *task;
-
- struct sk_buff_head rx_queue;
- struct sk_buff_head tx_queue;
-
- struct timer_list net_timer;
-
- struct page *mmap_pages[MAX_PENDING_REQS];
-
- pending_ring_idx_t pending_prod;
- pending_ring_idx_t pending_cons;
- struct list_head net_schedule_list;
-
- /* Protect the net_schedule_list in netif. */
- spinlock_t net_schedule_list_lock;
-
- atomic_t netfront_count;
-
- struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
- /* Coalescing tx requests before copying makes number of grant
- * copy ops greater or equal to number of slots required. In
- * worst case a tx request consumes 2 gnttab_copy.
- */
- struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
-
- u16 pending_ring[MAX_PENDING_REQS];
-
- /*
- * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
- * head/fragment page uses 2 copy operations because it
- * straddles two buffers in the frontend.
- */
- struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
- struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
-};
-
-static struct xen_netbk *xen_netbk;
-static int xen_netbk_group_nr;
-
/*
* If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
* one or more merged tx requests, otherwise it is the continuation of
* previous tx request.
*/
-static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
-{
- return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
-}
-
-void xen_netbk_add_xenvif(struct xenvif *vif)
+static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
{
- int i;
- int min_netfront_count;
- int min_group = 0;
- struct xen_netbk *netbk;
-
- min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
- for (i = 0; i < xen_netbk_group_nr; i++) {
- int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
- if (netfront_count < min_netfront_count) {
- min_group = i;
- min_netfront_count = netfront_count;
- }
- }
-
- netbk = &xen_netbk[min_group];
-
- vif->netbk = netbk;
- atomic_inc(&netbk->netfront_count);
+ return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
}
-void xen_netbk_remove_xenvif(struct xenvif *vif)
-{
- struct xen_netbk *netbk = vif->netbk;
- vif->netbk = NULL;
- atomic_dec(&netbk->netfront_count);
-}
+static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+ u8 status);
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
- u8 status);
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st);
+
+static inline int tx_work_todo(struct xenvif *vif);
+static inline int rx_work_todo(struct xenvif *vif);
+
static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 id,
s8 st,
@@ -202,55 +97,16 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 size,
u16 flags);
-static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
+static inline unsigned long idx_to_pfn(struct xenvif *vif,
u16 idx)
{
- return page_to_pfn(netbk->mmap_pages[idx]);
+ return page_to_pfn(vif->mmap_pages[idx]);
}
-static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
+static inline unsigned long idx_to_kaddr(struct xenvif *vif,
u16 idx)
{
- return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
-}
-
-/* extra field used in struct page */
-static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
- unsigned int idx)
-{
- unsigned int group = netbk - xen_netbk;
- union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
-
- BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
- pg->mapping = ext.mapping;
-}
-
-static int get_page_ext(struct page *pg,
- unsigned int *pgroup, unsigned int *pidx)
-{
- union page_ext ext = { .mapping = pg->mapping };
- struct xen_netbk *netbk;
- unsigned int group, idx;
-
- group = ext.e.group - 1;
-
- if (group < 0 || group >= xen_netbk_group_nr)
- return 0;
-
- netbk = &xen_netbk[group];
-
- idx = ext.e.idx;
-
- if ((idx < 0) || (idx >= MAX_PENDING_REQS))
- return 0;
-
- if (netbk->mmap_pages[idx] != pg)
- return 0;
-
- *pgroup = group;
- *pidx = idx;
-
- return 1;
+ return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
}
/*
@@ -278,15 +134,10 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
+static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
{
return MAX_PENDING_REQS -
- netbk->pending_prod + netbk->pending_cons;
-}
-
-static void xen_netbk_kick_thread(struct xen_netbk *netbk)
-{
- wake_up(&netbk->wq);
+ vif->pending_prod + vif->pending_cons;
}
static int max_required_rx_slots(struct xenvif *vif)
@@ -300,7 +151,7 @@ static int max_required_rx_slots(struct xenvif *vif)
return max;
}
-int xen_netbk_rx_ring_full(struct xenvif *vif)
+int xenvif_rx_ring_full(struct xenvif *vif)
{
RING_IDX peek = vif->rx_req_cons_peek;
RING_IDX needed = max_required_rx_slots(vif);
@@ -309,16 +160,16 @@ int xen_netbk_rx_ring_full(struct xenvif *vif)
((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
}
-int xen_netbk_must_stop_queue(struct xenvif *vif)
+int xenvif_must_stop_queue(struct xenvif *vif)
{
- if (!xen_netbk_rx_ring_full(vif))
+ if (!xenvif_rx_ring_full(vif))
return 0;
vif->rx.sring->req_event = vif->rx_req_cons_peek +
max_required_rx_slots(vif);
mb(); /* request notification /then/ check the queue */
- return xen_netbk_rx_ring_full(vif);
+ return xenvif_rx_ring_full(vif);
}
/*
@@ -364,9 +215,9 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
/*
* Figure out how many ring slots we're going to need to send @skb to
* the guest. This function is essentially a dry run of
- * netbk_gop_frag_copy.
+ * xenvif_gop_frag_copy.
*/
-unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
+unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
unsigned int count;
int i, copy_off;
@@ -418,15 +269,15 @@ struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
struct gnttab_copy *copy;
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
int copy_off;
grant_ref_t copy_gref;
};
-static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
- struct netrx_pending_operations *npo)
+static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
+ struct netrx_pending_operations *npo)
{
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
struct xen_netif_rx_request *req;
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
@@ -446,19 +297,13 @@ static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
*/
-static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
- struct netrx_pending_operations *npo,
- struct page *page, unsigned long size,
- unsigned long offset, int *head)
+static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+ struct netrx_pending_operations *npo,
+ struct page *page, unsigned long size,
+ unsigned long offset, int *head)
{
struct gnttab_copy *copy_gop;
- struct netbk_rx_meta *meta;
- /*
- * These variables are used iff get_page_ext returns true,
- * in which case they are guaranteed to be initialized.
- */
- unsigned int uninitialized_var(group), uninitialized_var(idx);
- int foreign = get_page_ext(page, &group, &idx);
+ struct xenvif_rx_meta *meta;
unsigned long bytes;
/* Data must not cross a page boundary. */
@@ -494,26 +339,15 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
copy_gop = npo->copy + npo->copy_prod++;
copy_gop->flags = GNTCOPY_dest_gref;
- if (foreign) {
- struct xen_netbk *netbk = &xen_netbk[group];
- struct pending_tx_info *src_pend;
-
- src_pend = &netbk->pending_tx_info[idx];
+ copy_gop->len = bytes;
- copy_gop->source.domid = src_pend->vif->domid;
- copy_gop->source.u.ref = src_pend->req.gref;
- copy_gop->flags |= GNTCOPY_source_gref;
- } else {
- void *vaddr = page_address(page);
- copy_gop->source.domid = DOMID_SELF;
- copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
- }
+ copy_gop->source.domid = DOMID_SELF;
+ copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
copy_gop->source.offset = offset;
- copy_gop->dest.domid = vif->domid;
+ copy_gop->dest.domid = vif->domid;
copy_gop->dest.offset = npo->copy_off;
copy_gop->dest.u.ref = npo->copy_gref;
- copy_gop->len = bytes;
npo->copy_off += bytes;
meta->size += bytes;
@@ -549,14 +383,14 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
* zero GSO descriptors (for non-GSO packets) or one descriptor (for
* frontend-side LRO).
*/
-static int netbk_gop_skb(struct sk_buff *skb,
- struct netrx_pending_operations *npo)
+static int xenvif_gop_skb(struct sk_buff *skb,
+ struct netrx_pending_operations *npo)
{
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
struct xen_netif_rx_request *req;
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
unsigned char *data;
int head = 1;
int old_meta_prod;
@@ -593,30 +427,30 @@ static int netbk_gop_skb(struct sk_buff *skb,
if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data;
- netbk_gop_frag_copy(vif, skb, npo,
- virt_to_page(data), len, offset, &head);
+ xenvif_gop_frag_copy(vif, skb, npo,
+ virt_to_page(data), len, offset, &head);
data += len;
}
for (i = 0; i < nr_frags; i++) {
- netbk_gop_frag_copy(vif, skb, npo,
- skb_frag_page(&skb_shinfo(skb)->frags[i]),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].page_offset,
- &head);
+ xenvif_gop_frag_copy(vif, skb, npo,
+ skb_frag_page(&skb_shinfo(skb)->frags[i]),
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
+ skb_shinfo(skb)->frags[i].page_offset,
+ &head);
}
return npo->meta_prod - old_meta_prod;
}
/*
- * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
+ * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
* used to set up the operations on the top of
* netrx_pending_operations, which have since been done. Check that
* they didn't give any errors and advance over them.
*/
-static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
- struct netrx_pending_operations *npo)
+static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
+ struct netrx_pending_operations *npo)
{
struct gnttab_copy *copy_op;
int status = XEN_NETIF_RSP_OKAY;
@@ -635,9 +469,9 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
return status;
}
-static void netbk_add_frag_responses(struct xenvif *vif, int status,
- struct netbk_rx_meta *meta,
- int nr_meta_slots)
+static void xenvif_add_frag_responses(struct xenvif *vif, int status,
+ struct xenvif_rx_meta *meta,
+ int nr_meta_slots)
{
int i;
unsigned long offset;
@@ -665,9 +499,13 @@ struct skb_cb_overlay {
int meta_slots_used;
};
-static void xen_netbk_rx_action(struct xen_netbk *netbk)
+static void xenvif_kick_thread(struct xenvif *vif)
+{
+ wake_up(&vif->wq);
+}
+
+void xenvif_rx_action(struct xenvif *vif)
{
- struct xenvif *vif = NULL, *tmp;
s8 status;
u16 flags;
struct xen_netif_rx_response *resp;
@@ -679,22 +517,23 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
int count;
unsigned long offset;
struct skb_cb_overlay *sco;
+ int need_to_notify = 0;
struct netrx_pending_operations npo = {
- .copy = netbk->grant_copy_op,
- .meta = netbk->meta,
+ .copy = vif->grant_copy_op,
+ .meta = vif->meta,
};
skb_queue_head_init(&rxq);
count = 0;
- while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
+ while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
vif = netdev_priv(skb->dev);
nr_frags = skb_shinfo(skb)->nr_frags;
sco = (struct skb_cb_overlay *)skb->cb;
- sco->meta_slots_used = netbk_gop_skb(skb, &npo);
+ sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
count += nr_frags + 1;
@@ -706,27 +545,27 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
break;
}
- BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
+ BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
if (!npo.copy_prod)
return;
- BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
- gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
+ BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+ gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
sco = (struct skb_cb_overlay *)skb->cb;
vif = netdev_priv(skb->dev);
- if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+ if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
resp = RING_GET_RESPONSE(&vif->rx,
- vif->rx.rsp_prod_pvt++);
+ vif->rx.rsp_prod_pvt++);
resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
- resp->offset = netbk->meta[npo.meta_cons].gso_size;
- resp->id = netbk->meta[npo.meta_cons].id;
+ resp->offset = vif->meta[npo.meta_cons].gso_size;
+ resp->id = vif->meta[npo.meta_cons].id;
resp->status = sco->meta_slots_used;
npo.meta_cons++;
@@ -737,7 +576,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
+ status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
if (sco->meta_slots_used == 1)
flags = 0;
@@ -751,12 +590,12 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
flags |= XEN_NETRXF_data_validated;
offset = 0;
- resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
+ resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
status, offset,
- netbk->meta[npo.meta_cons].size,
+ vif->meta[npo.meta_cons].size,
flags);
- if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+ if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
RING_GET_RESPONSE(&vif->rx,
@@ -764,7 +603,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
resp->flags |= XEN_NETRXF_extra_info;
- gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
+ gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
@@ -773,123 +612,44 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
gso->flags = 0;
}
- netbk_add_frag_responses(vif, status,
- netbk->meta + npo.meta_cons + 1,
- sco->meta_slots_used);
+ xenvif_add_frag_responses(vif, status,
+ vif->meta + npo.meta_cons + 1,
+ sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
+ if (ret)
+ need_to_notify = 1;
+
xenvif_notify_tx_completion(vif);
- if (ret && list_empty(&vif->notify_list))
- list_add_tail(&vif->notify_list, &notify);
- else
- xenvif_put(vif);
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
- list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
+ if (need_to_notify)
notify_remote_via_irq(vif->rx_irq);
- list_del_init(&vif->notify_list);
- xenvif_put(vif);
- }
/* More work to do? */
- if (!skb_queue_empty(&netbk->rx_queue) &&
- !timer_pending(&netbk->net_timer))
- xen_netbk_kick_thread(netbk);
-}
-
-void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
-{
- struct xen_netbk *netbk = vif->netbk;
-
- skb_queue_tail(&netbk->rx_queue, skb);
-
- xen_netbk_kick_thread(netbk);
-}
-
-static void xen_netbk_alarm(unsigned long data)
-{
- struct xen_netbk *netbk = (struct xen_netbk *)data;
- xen_netbk_kick_thread(netbk);
-}
-
-static int __on_net_schedule_list(struct xenvif *vif)
-{
- return !list_empty(&vif->schedule_list);
-}
-
-/* Must be called with net_schedule_list_lock held */
-static void remove_from_net_schedule_list(struct xenvif *vif)
-{
- if (likely(__on_net_schedule_list(vif))) {
- list_del_init(&vif->schedule_list);
- xenvif_put(vif);
- }
-}
-
-static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
-{
- struct xenvif *vif = NULL;
-
- spin_lock_irq(&netbk->net_schedule_list_lock);
- if (list_empty(&netbk->net_schedule_list))
- goto out;
-
- vif = list_first_entry(&netbk->net_schedule_list,
- struct xenvif, schedule_list);
- if (!vif)
- goto out;
-
- xenvif_get(vif);
-
- remove_from_net_schedule_list(vif);
-out:
- spin_unlock_irq(&netbk->net_schedule_list_lock);
- return vif;
+ if (!skb_queue_empty(&vif->rx_queue))
+ xenvif_kick_thread(vif);
}
-void xen_netbk_schedule_xenvif(struct xenvif *vif)
+void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
{
- unsigned long flags;
- struct xen_netbk *netbk = vif->netbk;
-
- if (__on_net_schedule_list(vif))
- goto kick;
-
- spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
- if (!__on_net_schedule_list(vif) &&
- likely(xenvif_schedulable(vif))) {
- list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
- xenvif_get(vif);
- }
- spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
-
-kick:
- smp_mb();
- if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
- !list_empty(&netbk->net_schedule_list))
- xen_netbk_kick_thread(netbk);
-}
+ skb_queue_tail(&vif->rx_queue, skb);
-void xen_netbk_deschedule_xenvif(struct xenvif *vif)
-{
- struct xen_netbk *netbk = vif->netbk;
- spin_lock_irq(&netbk->net_schedule_list_lock);
- remove_from_net_schedule_list(vif);
- spin_unlock_irq(&netbk->net_schedule_list_lock);
+ xenvif_kick_thread(vif);
}
-void xen_netbk_check_rx_xenvif(struct xenvif *vif)
+void xenvif_check_rx_xenvif(struct xenvif *vif)
{
int more_to_do;
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
if (more_to_do)
- xen_netbk_schedule_xenvif(vif);
+ napi_schedule(&vif->napi);
}
static void tx_add_credit(struct xenvif *vif)
@@ -916,11 +676,11 @@ static void tx_credit_callback(unsigned long data)
{
struct xenvif *vif = (struct xenvif *)data;
tx_add_credit(vif);
- xen_netbk_check_rx_xenvif(vif);
+ xenvif_check_rx_xenvif(vif);
}
-static void netbk_tx_err(struct xenvif *vif,
- struct xen_netif_tx_request *txp, RING_IDX end)
+static void xenvif_tx_err(struct xenvif *vif,
+ struct xen_netif_tx_request *txp, RING_IDX end)
{
RING_IDX cons = vif->tx.req_cons;
@@ -931,21 +691,18 @@ static void netbk_tx_err(struct xenvif *vif,
txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1);
vif->tx.req_cons = cons;
- xen_netbk_check_rx_xenvif(vif);
- xenvif_put(vif);
}
-static void netbk_fatal_tx_err(struct xenvif *vif)
+static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
xenvif_carrier_off(vif);
- xenvif_put(vif);
}
-static int netbk_count_requests(struct xenvif *vif,
- struct xen_netif_tx_request *first,
- struct xen_netif_tx_request *txp,
- int work_to_do)
+static int xenvif_count_requests(struct xenvif *vif,
+ struct xen_netif_tx_request *first,
+ struct xen_netif_tx_request *txp,
+ int work_to_do)
{
RING_IDX cons = vif->tx.req_cons;
int slots = 0;
@@ -962,7 +719,7 @@ static int netbk_count_requests(struct xenvif *vif,
netdev_err(vif->dev,
"Asked for %d slots but exceeds this limit\n",
work_to_do);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -ENODATA;
}
@@ -973,7 +730,7 @@ static int netbk_count_requests(struct xenvif *vif,
netdev_err(vif->dev,
"Malicious frontend using %d slots, threshold %u\n",
slots, fatal_skb_slots);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -E2BIG;
}
@@ -1021,7 +778,7 @@ static int netbk_count_requests(struct xenvif *vif,
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
txp->offset, txp->size);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1033,30 +790,30 @@ static int netbk_count_requests(struct xenvif *vif,
} while (more_data);
if (drop_err) {
- netbk_tx_err(vif, first, cons + slots);
+ xenvif_tx_err(vif, first, cons + slots);
return drop_err;
}
return slots;
}
-static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
- u16 pending_idx)
+static struct page *xenvif_alloc_page(struct xenvif *vif,
+ u16 pending_idx)
{
struct page *page;
- page = alloc_page(GFP_KERNEL|__GFP_COLD);
+
+ page = alloc_page(GFP_ATOMIC|__GFP_COLD);
if (!page)
return NULL;
- set_page_ext(page, netbk, pending_idx);
- netbk->mmap_pages[pending_idx] = page;
+ vif->mmap_pages[pending_idx] = page;
+
return page;
}
-static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
- struct xenvif *vif,
- struct sk_buff *skb,
- struct xen_netif_tx_request *txp,
- struct gnttab_copy *gop)
+static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_copy *gop)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
@@ -1079,14 +836,14 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
/* Coalesce tx requests, at this point the packet passed in
* should be <= 64K. Any packets larger than 64K have been
- * handled in netbk_count_requests().
+ * handled in xenvif_count_requests().
*/
for (shinfo->nr_frags = slot = start; slot < nr_slots;
shinfo->nr_frags++) {
struct pending_tx_info *pending_tx_info =
- netbk->pending_tx_info;
+ vif->pending_tx_info;
- page = alloc_page(GFP_KERNEL|__GFP_COLD);
+ page = alloc_page(GFP_ATOMIC|__GFP_COLD);
if (!page)
goto err;
@@ -1121,21 +878,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
gop->len = txp->size;
dst_offset += gop->len;
- index = pending_index(netbk->pending_cons++);
+ index = pending_index(vif->pending_cons++);
- pending_idx = netbk->pending_ring[index];
+ pending_idx = vif->pending_ring[index];
memcpy(&pending_tx_info[pending_idx].req, txp,
sizeof(*txp));
- xenvif_get(vif);
-
- pending_tx_info[pending_idx].vif = vif;
/* Poison these fields, corresponding
* fields for head tx req will be set
* to correct values after the loop.
*/
- netbk->mmap_pages[pending_idx] = (void *)(~0UL);
+ vif->mmap_pages[pending_idx] = (void *)(~0UL);
pending_tx_info[pending_idx].head =
INVALID_PENDING_RING_IDX;
@@ -1155,8 +909,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
first->req.offset = 0;
first->req.size = dst_offset;
first->head = start_idx;
- set_page_ext(page, netbk, head_idx);
- netbk->mmap_pages[head_idx] = page;
+ vif->mmap_pages[head_idx] = page;
frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
}
@@ -1166,20 +919,20 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
err:
/* Unwind, freeing all pages and sending error responses. */
while (shinfo->nr_frags-- > start) {
- xen_netbk_idx_release(netbk,
+ xenvif_idx_release(vif,
frag_get_pending_idx(&frags[shinfo->nr_frags]),
XEN_NETIF_RSP_ERROR);
}
/* The head too, if necessary. */
if (start)
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
return NULL;
}
-static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
- struct sk_buff *skb,
- struct gnttab_copy **gopp)
+static int xenvif_tx_check_gop(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct gnttab_copy **gopp)
{
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
@@ -1192,7 +945,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
/* Check status of header. */
err = gop->status;
if (unlikely(err))
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
@@ -1202,7 +955,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
pending_ring_idx_t head;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
- tx_info = &netbk->pending_tx_info[pending_idx];
+ tx_info = &vif->pending_tx_info[pending_idx];
head = tx_info->head;
/* Check error status: if okay then remember grant handle. */
@@ -1210,18 +963,19 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
newerr = (++gop)->status;
if (newerr)
break;
- peek = netbk->pending_ring[pending_index(++head)];
- } while (!pending_tx_is_head(netbk, peek));
+ peek = vif->pending_ring[pending_index(++head)];
+ } while (!pending_tx_is_head(vif, peek));
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(vif, pending_idx,
+ XEN_NETIF_RSP_OKAY);
continue;
}
/* Error on this fragment: respond to client with an error. */
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
@@ -1229,10 +983,11 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
/* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data);
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(vif, pending_idx,
+ XEN_NETIF_RSP_OKAY);
}
/* Remember the error: invalidate all subsequent fragments. */
@@ -1243,7 +998,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
return err;
}
-static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
@@ -1257,20 +1012,20 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
pending_idx = frag_get_pending_idx(frag);
- txp = &netbk->pending_tx_info[pending_idx].req;
- page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
+ txp = &vif->pending_tx_info[pending_idx].req;
+ page = virt_to_page(idx_to_kaddr(vif, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
- /* Take an extra reference to offset xen_netbk_idx_release */
- get_page(netbk->mmap_pages[pending_idx]);
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ /* Take an extra reference to offset xenvif_idx_release */
+ get_page(vif->mmap_pages[pending_idx]);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
}
}
-static int xen_netbk_get_extras(struct xenvif *vif,
+static int xenvif_get_extras(struct xenvif *vif,
struct xen_netif_extra_info *extras,
int work_to_do)
{
@@ -1280,7 +1035,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
do {
if (unlikely(work_to_do-- <= 0)) {
netdev_err(vif->dev, "Missing extra info\n");
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EBADR;
}
@@ -1291,7 +1046,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
vif->tx.req_cons = ++cons;
netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1302,20 +1057,20 @@ static int xen_netbk_get_extras(struct xenvif *vif,
return work_to_do;
}
-static int netbk_set_skb_gso(struct xenvif *vif,
- struct sk_buff *skb,
- struct xen_netif_extra_info *gso)
+static int xenvif_set_skb_gso(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
netdev_err(vif->dev, "GSO size must not be zero.\n");
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EINVAL;
}
/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1426,16 +1181,14 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return false;
}
-static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+static unsigned xenvif_tx_build_gops(struct xenvif *vif)
{
- struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
+ struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
struct sk_buff *skb;
int ret;
- while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS) &&
- !list_empty(&netbk->net_schedule_list)) {
- struct xenvif *vif;
+ while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
+ < MAX_PENDING_REQS)) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct page *page;
@@ -1446,16 +1199,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
unsigned int data_len;
pending_ring_idx_t index;
- /* Get a netif from the list with work to do. */
- vif = poll_net_schedule_list(netbk);
- /* This can sometimes happen because the test of
- * list_empty(net_schedule_list) at the top of the
- * loop is unlocked. Just go back and have another
- * look.
- */
- if (!vif)
- continue;
-
if (vif->tx.sring->req_prod - vif->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
netdev_err(vif->dev,
@@ -1463,15 +1206,13 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
"req_prod %d, req_cons %d, size %ld\n",
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
continue;
}
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
- if (!work_to_do) {
- xenvif_put(vif);
- continue;
- }
+ if (!work_to_do)
+ break;
idx = vif->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
@@ -1479,10 +1220,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
/* Credit-based scheduling. */
if (txreq.size > vif->remaining_credit &&
- tx_credit_exceeded(vif, txreq.size)) {
- xenvif_put(vif);
- continue;
- }
+ tx_credit_exceeded(vif, txreq.size))
+ break;
vif->remaining_credit -= txreq.size;
@@ -1491,24 +1230,24 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
- work_to_do = xen_netbk_get_extras(vif, extras,
- work_to_do);
+ work_to_do = xenvif_get_extras(vif, extras,
+ work_to_do);
idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0))
- continue;
+ break;
}
- ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+ ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
- continue;
+ break;
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(vif->dev,
"Bad packet size: %d\n", txreq.size);
- netbk_tx_err(vif, &txreq, idx);
- continue;
+ xenvif_tx_err(vif, &txreq, idx);
+ break;
}
/* No crossing a page as the payload mustn't fragment. */
@@ -1517,12 +1256,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
- netbk_fatal_tx_err(vif);
- continue;
+ xenvif_fatal_tx_err(vif);
+ break;
}
- index = pending_index(netbk->pending_cons);
- pending_idx = netbk->pending_ring[index];
+ index = pending_index(vif->pending_cons);
+ pending_idx = vif->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1533,7 +1272,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
- netbk_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(vif, &txreq, idx);
break;
}
@@ -1544,19 +1283,19 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
- if (netbk_set_skb_gso(vif, skb, gso)) {
- /* Failure in netbk_set_skb_gso is fatal. */
+ if (xenvif_set_skb_gso(vif, skb, gso)) {
+ /* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
- continue;
+ break;
}
}
/* XXX could copy straight to head */
- page = xen_netbk_alloc_page(netbk, pending_idx);
+ page = xenvif_alloc_page(vif, pending_idx);
if (!page) {
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
- continue;
+ xenvif_tx_err(vif, &txreq, idx);
+ break;
}
gop->source.u.ref = txreq.gref;
@@ -1572,10 +1311,9 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
gop++;
- memcpy(&netbk->pending_tx_info[pending_idx].req,
+ memcpy(&vif->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
- netbk->pending_tx_info[pending_idx].vif = vif;
- netbk->pending_tx_info[pending_idx].head = index;
+ vif->pending_tx_info[pending_idx].head = index;
*((u16 *)skb->data) = pending_idx;
__skb_put(skb, data_len);
@@ -1590,46 +1328,45 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
INVALID_PENDING_IDX);
}
- netbk->pending_cons++;
+ vif->pending_cons++;
- request_gop = xen_netbk_get_requests(netbk, vif,
- skb, txfrags, gop);
+ request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
- continue;
+ xenvif_tx_err(vif, &txreq, idx);
+ break;
}
gop = request_gop;
- __skb_queue_tail(&netbk->tx_queue, skb);
+ __skb_queue_tail(&vif->tx_queue, skb);
vif->tx.req_cons = idx;
- xen_netbk_check_rx_xenvif(vif);
- if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
+ if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
break;
}
- return gop - netbk->tx_copy_ops;
+ return gop - vif->tx_copy_ops;
}
-static void xen_netbk_tx_submit(struct xen_netbk *netbk)
+
+static int xenvif_tx_submit(struct xenvif *vif, int budget)
{
- struct gnttab_copy *gop = netbk->tx_copy_ops;
+ struct gnttab_copy *gop = vif->tx_copy_ops;
struct sk_buff *skb;
+ int work_done = 0;
- while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
+ while (work_done < budget &&
+ (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
- struct xenvif *vif;
u16 pending_idx;
unsigned data_len;
pending_idx = *((u16 *)skb->data);
- vif = netbk->pending_tx_info[pending_idx].vif;
- txp = &netbk->pending_tx_info[pending_idx].req;
+ txp = &vif->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
- if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
+ if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
netdev_dbg(vif->dev, "netback grant failed.\n");
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
@@ -1638,7 +1375,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
data_len = skb->len;
memcpy(skb->data,
- (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
+ (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
data_len);
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
@@ -1646,7 +1383,8 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(vif, pending_idx,
+ XEN_NETIF_RSP_OKAY);
}
if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1654,7 +1392,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
else if (txp->flags & XEN_NETTXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- xen_netbk_fill_frags(netbk, skb);
+ xenvif_fill_frags(vif, skb);
/*
* If the initial fragment was < PKT_PROT_LEN then
@@ -1682,53 +1420,61 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
vif->dev->stats.rx_bytes += skb->len;
vif->dev->stats.rx_packets++;
- xenvif_receive_skb(vif, skb);
+ work_done++;
+
+ netif_receive_skb(skb);
}
+
+ return work_done;
}
/* Called after netfront has transmitted */
-static void xen_netbk_tx_action(struct xen_netbk *netbk)
+int xenvif_tx_action(struct xenvif *vif, int budget)
{
unsigned nr_gops;
+ int work_done;
+
+ if (unlikely(!tx_work_todo(vif)))
+ return 0;
- nr_gops = xen_netbk_tx_build_gops(netbk);
+ nr_gops = xenvif_tx_build_gops(vif);
if (nr_gops == 0)
- return;
+ return 0;
- gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
+ gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
- xen_netbk_tx_submit(netbk);
+ work_done = xenvif_tx_submit(vif, nr_gops);
+
+ return work_done;
}
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
- u8 status)
+static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+ u8 status)
{
- struct xenvif *vif;
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t head;
u16 peek; /* peek into next tx request */
- BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
+ BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
/* Already complete? */
- if (netbk->mmap_pages[pending_idx] == NULL)
+ if (vif->mmap_pages[pending_idx] == NULL)
return;
- pending_tx_info = &netbk->pending_tx_info[pending_idx];
+ pending_tx_info = &vif->pending_tx_info[pending_idx];
- vif = pending_tx_info->vif;
head = pending_tx_info->head;
- BUG_ON(!pending_tx_is_head(netbk, head));
- BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
+ BUG_ON(!pending_tx_is_head(vif, head));
+ BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
do {
pending_ring_idx_t index;
pending_ring_idx_t idx = pending_index(head);
- u16 info_idx = netbk->pending_ring[idx];
+ u16 info_idx = vif->pending_ring[idx];
- pending_tx_info = &netbk->pending_tx_info[info_idx];
+ pending_tx_info = &vif->pending_tx_info[info_idx];
make_tx_response(vif, &pending_tx_info->req, status);
/* Setting any number other than
@@ -1737,18 +1483,15 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
*/
pending_tx_info->head = 0;
- index = pending_index(netbk->pending_prod++);
- netbk->pending_ring[index] = netbk->pending_ring[info_idx];
-
- xenvif_put(vif);
+ index = pending_index(vif->pending_prod++);
+ vif->pending_ring[index] = vif->pending_ring[info_idx];
- peek = netbk->pending_ring[pending_index(++head)];
+ peek = vif->pending_ring[pending_index(++head)];
- } while (!pending_tx_is_head(netbk, peek));
+ } while (!pending_tx_is_head(vif, peek));
- netbk->mmap_pages[pending_idx]->mapping = 0;
- put_page(netbk->mmap_pages[pending_idx]);
- netbk->mmap_pages[pending_idx] = NULL;
+ put_page(vif->mmap_pages[pending_idx]);
+ vif->mmap_pages[pending_idx] = NULL;
}
@@ -1796,46 +1539,23 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
return resp;
}
-static inline int rx_work_todo(struct xen_netbk *netbk)
+static inline int rx_work_todo(struct xenvif *vif)
{
- return !skb_queue_empty(&netbk->rx_queue);
+ return !skb_queue_empty(&vif->rx_queue);
}
-static inline int tx_work_todo(struct xen_netbk *netbk)
+static inline int tx_work_todo(struct xenvif *vif)
{
- if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS) &&
- !list_empty(&netbk->net_schedule_list))
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
+ (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
+ < MAX_PENDING_REQS))
return 1;
return 0;
}
-static int xen_netbk_kthread(void *data)
-{
- struct xen_netbk *netbk = data;
- while (!kthread_should_stop()) {
- wait_event_interruptible(netbk->wq,
- rx_work_todo(netbk) ||
- tx_work_todo(netbk) ||
- kthread_should_stop());
- cond_resched();
-
- if (kthread_should_stop())
- break;
-
- if (rx_work_todo(netbk))
- xen_netbk_rx_action(netbk);
-
- if (tx_work_todo(netbk))
- xen_netbk_tx_action(netbk);
- }
-
- return 0;
-}
-
-void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
+void xenvif_unmap_frontend_rings(struct xenvif *vif)
{
if (vif->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
@@ -1845,9 +1565,9 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
vif->rx.sring);
}
-int xen_netbk_map_frontend_rings(struct xenvif *vif,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref)
+int xenvif_map_frontend_rings(struct xenvif *vif,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref)
{
void *addr;
struct xen_netif_tx_sring *txs;
@@ -1876,15 +1596,33 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
return 0;
err:
- xen_netbk_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(vif);
return err;
}
+int xenvif_kthread(void *data)
+{
+ struct xenvif *vif = data;
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(vif->wq,
+ rx_work_todo(vif) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ if (rx_work_todo(vif))
+ xenvif_rx_action(vif);
+
+ cond_resched();
+ }
+
+ return 0;
+}
+
static int __init netback_init(void)
{
- int i;
int rc = 0;
- int group;
if (!xen_domain())
return -ENODEV;
@@ -1895,48 +1633,6 @@ static int __init netback_init(void)
fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
}
- xen_netbk_group_nr = num_online_cpus();
- xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
- if (!xen_netbk)
- return -ENOMEM;
-
- for (group = 0; group < xen_netbk_group_nr; group++) {
- struct xen_netbk *netbk = &xen_netbk[group];
- skb_queue_head_init(&netbk->rx_queue);
- skb_queue_head_init(&netbk->tx_queue);
-
- init_timer(&netbk->net_timer);
- netbk->net_timer.data = (unsigned long)netbk;
- netbk->net_timer.function = xen_netbk_alarm;
-
- netbk->pending_cons = 0;
- netbk->pending_prod = MAX_PENDING_REQS;
- for (i = 0; i < MAX_PENDING_REQS; i++)
- netbk->pending_ring[i] = i;
-
- init_waitqueue_head(&netbk->wq);
- netbk->task = kthread_create(xen_netbk_kthread,
- (void *)netbk,
- "netback/%u", group);
-
- if (IS_ERR(netbk->task)) {
- pr_alert("kthread_create() fails at netback\n");
- del_timer(&netbk->net_timer);
- rc = PTR_ERR(netbk->task);
- goto failed_init;
- }
-
- kthread_bind(netbk->task, group);
-
- INIT_LIST_HEAD(&netbk->net_schedule_list);
-
- spin_lock_init(&netbk->net_schedule_list_lock);
-
- atomic_set(&netbk->netfront_count, 0);
-
- wake_up_process(netbk->task);
- }
-
rc = xenvif_xenbus_init();
if (rc)
goto failed_init;
@@ -1944,35 +1640,14 @@ static int __init netback_init(void)
return 0;
failed_init:
- while (--group >= 0) {
- struct xen_netbk *netbk = &xen_netbk[group];
- del_timer(&netbk->net_timer);
- kthread_stop(netbk->task);
- }
- vfree(xen_netbk);
return rc;
-
}
module_init(netback_init);
static void __exit netback_fini(void)
{
- int i, j;
-
xenvif_xenbus_fini();
-
- for (i = 0; i < xen_netbk_group_nr; i++) {
- struct xen_netbk *netbk = &xen_netbk[i];
- del_timer_sync(&netbk->net_timer);
- kthread_stop(netbk->task);
- for (j = 0; j < MAX_PENDING_REQS; j++) {
- if (netbk->mmap_pages[j])
- __free_page(netbk->mmap_pages[j]);
- }
- }
-
- vfree(xen_netbk);
}
module_exit(netback_fini);
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index c5c30fb1d7bf..9a53f13c88df 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -60,7 +60,7 @@ struct nfcsim {
static struct nfcsim *dev0;
static struct nfcsim *dev1;
-struct workqueue_struct *wq;
+static struct workqueue_struct *wq;
static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
{
@@ -481,7 +481,7 @@ static void nfcsim_free_device(struct nfcsim *dev)
kfree(dev);
}
-int __init nfcsim_init(void)
+static int __init nfcsim_init(void)
{
int rc;
@@ -522,7 +522,7 @@ exit:
return rc;
}
-void __exit nfcsim_exit(void)
+static void __exit nfcsim_exit(void)
{
nfcsim_cleanup_dev(dev0, 1);
nfcsim_cleanup_dev(dev1, 1);
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index daf92ac209f8..5df730be88a3 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -83,12 +83,20 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
/* How much time we spend listening for initiators */
#define PN533_LISTEN_TIME 2
+/* Delay between each poll frame (ms) */
+#define PN533_POLL_INTERVAL 10
-/* Standard pn533 frame definitions */
+/* Standard pn533 frame definitions (standard and extended)*/
#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \
+ 2) /* data[0] TFI, data[1] CC */
#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
+#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \
+ + 2) /* data[0] TFI, data[1] CC */
+
+#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+#define PN533_CMD_DATAFRAME_MAXLEN 240 /* max data length (send) */
+
/*
* Max extended frame payload len, excluding TFI and CC
* which are already in PN533_FRAME_HEADER_LEN.
@@ -99,6 +107,10 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
Postamble (1) */
#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
+/* Half start code (3), LEN (4) should be 0xffff for extended frame */
+#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \
+ && (hdr)->datalen_checksum == 0xFF)
+#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)])
/* start of frame */
#define PN533_STD_FRAME_SOF 0x00FF
@@ -124,7 +136,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83
/* PN533 Commands */
-#define PN533_STD_FRAME_CMD(f) (f->data[1])
+#define PN533_FRAME_CMD(f) (f->data[1])
#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
#define PN533_CMD_RF_CONFIGURATION 0x32
@@ -168,8 +180,9 @@ struct pn533_fw_version {
#define PN533_CFGITEM_MAX_RETRIES 0x05
#define PN533_CFGITEM_PASORI 0x82
-#define PN533_CFGITEM_RF_FIELD_ON 0x1
-#define PN533_CFGITEM_RF_FIELD_OFF 0x0
+#define PN533_CFGITEM_RF_FIELD_AUTO_RFCA 0x2
+#define PN533_CFGITEM_RF_FIELD_ON 0x1
+#define PN533_CFGITEM_RF_FIELD_OFF 0x0
#define PN533_CONFIG_TIMING_102 0xb
#define PN533_CONFIG_TIMING_204 0xc
@@ -257,7 +270,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
.initiator_data.felica = {
.opcode = PN533_FELICA_OPC_SENSF_REQ,
.sc = PN533_FELICA_SENSF_SC_ALL,
- .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
+ .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
.tsn = 0x03,
},
},
@@ -270,7 +283,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
.initiator_data.felica = {
.opcode = PN533_FELICA_OPC_SENSF_REQ,
.sc = PN533_FELICA_SENSF_SC_ALL,
- .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
+ .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
.tsn = 0x03,
},
},
@@ -352,13 +365,16 @@ struct pn533 {
struct urb *in_urb;
struct sk_buff_head resp_q;
+ struct sk_buff_head fragment_skb;
struct workqueue_struct *wq;
struct work_struct cmd_work;
struct work_struct cmd_complete_work;
- struct work_struct poll_work;
- struct work_struct mi_work;
+ struct delayed_work poll_work;
+ struct work_struct mi_rx_work;
+ struct work_struct mi_tx_work;
struct work_struct tg_work;
+ struct work_struct rf_work;
struct list_head cmd_queue;
struct pn533_cmd *cmd;
@@ -366,6 +382,7 @@ struct pn533 {
struct mutex cmd_lock; /* protects cmd queue */
void *cmd_complete_mi_arg;
+ void *cmd_complete_dep_arg;
struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
u8 poll_mod_count;
@@ -404,6 +421,15 @@ struct pn533_std_frame {
u8 data[];
} __packed;
+struct pn533_ext_frame { /* Extended Information frame */
+ u8 preamble;
+ __be16 start_frame;
+ __be16 eif_flag; /* fixed to 0xFFFF */
+ __be16 datalen;
+ u8 datalen_checksum;
+ u8 data[];
+} __packed;
+
struct pn533_frame_ops {
void (*tx_frame_init)(void *frame, u8 cmd_code);
void (*tx_frame_finish)(void *frame);
@@ -411,7 +437,7 @@ struct pn533_frame_ops {
int tx_header_len;
int tx_tail_len;
- bool (*rx_is_frame_valid)(void *frame);
+ bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev);
int (*rx_frame_size)(void *frame);
int rx_header_len;
int rx_tail_len;
@@ -486,7 +512,7 @@ static void pn533_acr122_tx_update_payload_len(void *_frame, int len)
frame->datalen += len;
}
-static bool pn533_acr122_is_rx_frame_valid(void *_frame)
+static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
{
struct pn533_acr122_rx_frame *frame = _frame;
@@ -511,7 +537,7 @@ static u8 pn533_acr122_get_cmd_code(void *frame)
{
struct pn533_acr122_rx_frame *f = frame;
- return PN533_STD_FRAME_CMD(f);
+ return PN533_FRAME_CMD(f);
}
static struct pn533_frame_ops pn533_acr122_frame_ops = {
@@ -530,6 +556,12 @@ static struct pn533_frame_ops pn533_acr122_frame_ops = {
.get_cmd_code = pn533_acr122_get_cmd_code,
};
+/* The rule: value(high byte) + value(low byte) + checksum = 0 */
+static inline u8 pn533_ext_checksum(u16 value)
+{
+ return ~(u8)(((value & 0xFF00) >> 8) + (u8)(value & 0xFF)) + 1;
+}
+
/* The rule: value + checksum = 0 */
static inline u8 pn533_std_checksum(u8 value)
{
@@ -555,7 +587,7 @@ static void pn533_std_tx_frame_init(void *_frame, u8 cmd_code)
frame->preamble = 0;
frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF);
PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT;
- PN533_STD_FRAME_CMD(frame) = cmd_code;
+ PN533_FRAME_CMD(frame) = cmd_code;
frame->datalen = 2;
}
@@ -578,21 +610,41 @@ static void pn533_std_tx_update_payload_len(void *_frame, int len)
frame->datalen += len;
}
-static bool pn533_std_rx_frame_is_valid(void *_frame)
+static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev)
{
u8 checksum;
- struct pn533_std_frame *frame = _frame;
+ struct pn533_std_frame *stdf = _frame;
- if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
+ if (stdf->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
return false;
- checksum = pn533_std_checksum(frame->datalen);
- if (checksum != frame->datalen_checksum)
- return false;
+ if (likely(!PN533_STD_IS_EXTENDED(stdf))) {
+ /* Standard frame code */
+ dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN;
- checksum = pn533_std_data_checksum(frame->data, frame->datalen);
- if (checksum != PN533_STD_FRAME_CHECKSUM(frame))
- return false;
+ checksum = pn533_std_checksum(stdf->datalen);
+ if (checksum != stdf->datalen_checksum)
+ return false;
+
+ checksum = pn533_std_data_checksum(stdf->data, stdf->datalen);
+ if (checksum != PN533_STD_FRAME_CHECKSUM(stdf))
+ return false;
+ } else {
+ /* Extended */
+ struct pn533_ext_frame *eif = _frame;
+
+ dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN;
+
+ checksum = pn533_ext_checksum(be16_to_cpu(eif->datalen));
+ if (checksum != eif->datalen_checksum)
+ return false;
+
+ /* check data checksum */
+ checksum = pn533_std_data_checksum(eif->data,
+ be16_to_cpu(eif->datalen));
+ if (checksum != PN533_EXT_FRAME_CHECKSUM(eif))
+ return false;
+ }
return true;
}
@@ -612,6 +664,14 @@ static inline int pn533_std_rx_frame_size(void *frame)
{
struct pn533_std_frame *f = frame;
+ /* check for Extended Information frame */
+ if (PN533_STD_IS_EXTENDED(f)) {
+ struct pn533_ext_frame *eif = frame;
+
+ return sizeof(struct pn533_ext_frame)
+ + be16_to_cpu(eif->datalen) + PN533_STD_FRAME_TAIL_LEN;
+ }
+
return sizeof(struct pn533_std_frame) + f->datalen +
PN533_STD_FRAME_TAIL_LEN;
}
@@ -619,8 +679,12 @@ static inline int pn533_std_rx_frame_size(void *frame)
static u8 pn533_std_get_cmd_code(void *frame)
{
struct pn533_std_frame *f = frame;
+ struct pn533_ext_frame *eif = frame;
- return PN533_STD_FRAME_CMD(f);
+ if (PN533_STD_IS_EXTENDED(f))
+ return PN533_FRAME_CMD(eif);
+ else
+ return PN533_FRAME_CMD(f);
}
static struct pn533_frame_ops pn533_std_frame_ops = {
@@ -675,7 +739,7 @@ static void pn533_recv_response(struct urb *urb)
print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
dev->ops->rx_frame_size(in_frame), false);
- if (!dev->ops->rx_is_frame_valid(in_frame)) {
+ if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
cmd->status = -EIO;
goto sched_wq;
@@ -1657,7 +1721,56 @@ static void pn533_listen_mode_timer(unsigned long data)
pn533_poll_next_mod(dev);
- queue_work(dev->wq, &dev->poll_work);
+ queue_delayed_work(dev->wq, &dev->poll_work,
+ msecs_to_jiffies(PN533_POLL_INTERVAL));
+}
+
+static int pn533_rf_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ int rc = 0;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+
+ nfc_dev_err(&dev->interface->dev, "%s RF setting error %d",
+ __func__, rc);
+
+ return rc;
+ }
+
+ queue_delayed_work(dev->wq, &dev->poll_work,
+ msecs_to_jiffies(PN533_POLL_INTERVAL));
+
+ dev_kfree_skb(resp);
+ return rc;
+}
+
+static void pn533_wq_rf(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, rf_work);
+ struct sk_buff *skb;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ skb = pn533_alloc_skb(dev, 2);
+ if (!skb)
+ return;
+
+ *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD;
+ *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
+
+ rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb,
+ pn533_rf_complete, NULL);
+ if (rc < 0) {
+ dev_kfree_skb(skb);
+ nfc_dev_err(&dev->interface->dev, "RF setting error %d", rc);
+ }
+
+ return;
}
static int pn533_poll_complete(struct pn533 *dev, void *arg,
@@ -1705,7 +1818,8 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
}
pn533_poll_next_mod(dev);
- queue_work(dev->wq, &dev->poll_work);
+ /* Not target found, turn radio off */
+ queue_work(dev->wq, &dev->rf_work);
done:
dev_kfree_skb(resp);
@@ -1770,7 +1884,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
static void pn533_wq_poll(struct work_struct *work)
{
- struct pn533 *dev = container_of(work, struct pn533, poll_work);
+ struct pn533 *dev = container_of(work, struct pn533, poll_work.work);
struct pn533_poll_modulations *cur_mod;
int rc;
@@ -1799,6 +1913,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
u32 im_protocols, u32 tm_protocols)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ u8 rand_mod;
nfc_dev_dbg(&dev->interface->dev,
"%s: im protocols 0x%x tm protocols 0x%x",
@@ -1822,11 +1937,15 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
tm_protocols = 0;
}
- dev->poll_mod_curr = 0;
pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
dev->poll_protocols = im_protocols;
dev->listen_protocols = tm_protocols;
+ /* Do not always start polling from the same modulation */
+ get_random_bytes(&rand_mod, sizeof(rand_mod));
+ rand_mod %= dev->poll_mod_count;
+ dev->poll_mod_curr = rand_mod;
+
return pn533_send_poll_frame(dev);
}
@@ -1845,6 +1964,7 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
}
pn533_abort_cmd(dev, GFP_KERNEL);
+ flush_delayed_work(&dev->poll_work);
pn533_poll_reset_mod_list(dev);
}
@@ -2037,28 +2157,15 @@ error:
return rc;
}
-static int pn533_mod_to_baud(struct pn533 *dev)
-{
- switch (dev->poll_mod_curr) {
- case PN533_POLL_MOD_106KBPS_A:
- return 0;
- case PN533_POLL_MOD_212KBPS_FELICA:
- return 1;
- case PN533_POLL_MOD_424KBPS_FELICA:
- return 2;
- default:
- return -EINVAL;
- }
-}
-
+static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
#define PASSIVE_DATA_LEN 5
static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 comm_mode, u8 *gb, size_t gb_len)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct sk_buff *skb;
- int rc, baud, skb_len;
- u8 *next, *arg;
+ int rc, skb_len;
+ u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
@@ -2076,41 +2183,39 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
return -EBUSY;
}
- baud = pn533_mod_to_baud(dev);
- if (baud < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Invalid curr modulation %d", dev->poll_mod_curr);
- return baud;
- }
-
skb_len = 3 + gb_len; /* ActPass + BR + Next */
- if (comm_mode == NFC_COMM_PASSIVE)
- skb_len += PASSIVE_DATA_LEN;
+ skb_len += PASSIVE_DATA_LEN;
- if (target && target->nfcid2_len)
- skb_len += NFC_NFCID3_MAXSIZE;
+ /* NFCID3 */
+ skb_len += NFC_NFCID3_MAXSIZE;
+ if (target && !target->nfcid2_len) {
+ nfcid3[0] = 0x1;
+ nfcid3[1] = 0xfe;
+ get_random_bytes(nfcid3 + 2, 6);
+ }
skb = pn533_alloc_skb(dev, skb_len);
if (!skb)
return -ENOMEM;
*skb_put(skb, 1) = !comm_mode; /* ActPass */
- *skb_put(skb, 1) = baud; /* Baud rate */
+ *skb_put(skb, 1) = 0x02; /* 424 kbps */
next = skb_put(skb, 1); /* Next */
*next = 0;
- if (comm_mode == NFC_COMM_PASSIVE && baud > 0) {
- memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data,
- PASSIVE_DATA_LEN);
- *next |= 1;
- }
+ /* Copy passive data */
+ memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+ *next |= 1;
- if (target && target->nfcid2_len) {
+ /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
+ if (target && target->nfcid2_len)
memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2,
target->nfcid2_len);
- *next |= 2;
- }
+ else
+ memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
+ NFC_NFCID3_MAXSIZE);
+ *next |= 2;
if (gb != NULL && gb_len > 0) {
memcpy(skb_put(skb, gb_len), gb, gb_len);
@@ -2127,6 +2232,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
*arg = !comm_mode;
+ pn533_rf_field(dev->nfc_dev, 0);
+
rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
pn533_in_dep_link_up_complete, arg);
@@ -2232,7 +2339,15 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
if (mi) {
dev->cmd_complete_mi_arg = arg;
- queue_work(dev->wq, &dev->mi_work);
+ queue_work(dev->wq, &dev->mi_rx_work);
+ return -EINPROGRESS;
+ }
+
+ /* Prepare for the next round */
+ if (skb_queue_len(&dev->fragment_skb) > 0) {
+ dev->cmd_complete_dep_arg = arg;
+ queue_work(dev->wq, &dev->mi_tx_work);
+
return -EINPROGRESS;
}
@@ -2253,6 +2368,50 @@ _error:
return rc;
}
+/* Split the Tx skb into small chunks */
+static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
+{
+ struct sk_buff *frag;
+ int frag_size;
+
+ do {
+ /* Remaining size */
+ if (skb->len > PN533_CMD_DATAFRAME_MAXLEN)
+ frag_size = PN533_CMD_DATAFRAME_MAXLEN;
+ else
+ frag_size = skb->len;
+
+ /* Allocate and reserve */
+ frag = pn533_alloc_skb(dev, frag_size);
+ if (!frag) {
+ skb_queue_purge(&dev->fragment_skb);
+ break;
+ }
+
+ /* Reserve the TG/MI byte */
+ skb_reserve(frag, 1);
+
+ /* MI + TG */
+ if (frag_size == PN533_CMD_DATAFRAME_MAXLEN)
+ *skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1);
+ else
+ *skb_push(frag, sizeof(u8)) = 1; /* TG */
+
+ memcpy(skb_put(frag, frag_size), skb->data, frag_size);
+
+ /* Reduce the size of incoming buffer */
+ skb_pull(skb, frag_size);
+
+ /* Add this to skb_queue */
+ skb_queue_tail(&dev->fragment_skb, frag);
+
+ } while (skb->len > 0);
+
+ dev_kfree_skb(skb);
+
+ return skb_queue_len(&dev->fragment_skb);
+}
+
static int pn533_transceive(struct nfc_dev *nfc_dev,
struct nfc_target *target, struct sk_buff *skb,
data_exchange_cb_t cb, void *cb_context)
@@ -2263,15 +2422,6 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
- /* TODO: Implement support to multi-part data exchange */
- nfc_dev_err(&dev->interface->dev,
- "Data length greater than the max allowed: %d",
- PN533_CMD_DATAEXCH_DATA_MAXLEN);
- rc = -ENOSYS;
- goto error;
- }
-
if (!dev->tgt_active_prot) {
nfc_dev_err(&dev->interface->dev,
"Can't exchange data if there is no active target");
@@ -2299,7 +2449,20 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
break;
}
default:
- *skb_push(skb, sizeof(u8)) = 1; /*TG*/
+ /* jumbo frame ? */
+ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
+ rc = pn533_fill_fragment_skbs(dev, skb);
+ if (rc <= 0)
+ goto error;
+
+ skb = skb_dequeue(&dev->fragment_skb);
+ if (!skb) {
+ rc = -EIO;
+ goto error;
+ }
+ } else {
+ *skb_push(skb, sizeof(u8)) = 1; /* TG */
+ }
rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE,
skb, pn533_data_exchange_complete,
@@ -2370,7 +2533,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
static void pn533_wq_mi_recv(struct work_struct *work)
{
- struct pn533 *dev = container_of(work, struct pn533, mi_work);
+ struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
struct sk_buff *skb;
int rc;
@@ -2418,6 +2581,61 @@ error:
queue_work(dev->wq, &dev->cmd_work);
}
+static void pn533_wq_mi_send(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, mi_tx_work);
+ struct sk_buff *skb;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ /* Grab the first skb in the queue */
+ skb = skb_dequeue(&dev->fragment_skb);
+
+ if (skb == NULL) { /* No more data */
+ /* Reset the queue for future use */
+ skb_queue_head_init(&dev->fragment_skb);
+ goto error;
+ }
+
+ switch (dev->device_type) {
+ case PN533_DEVICE_PASORI:
+ if (dev->tgt_active_prot != NFC_PROTO_FELICA) {
+ rc = -EIO;
+ break;
+ }
+
+ rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_COMM_THRU,
+ skb,
+ pn533_data_exchange_complete,
+ dev->cmd_complete_dep_arg);
+
+ break;
+
+ default:
+ /* Still some fragments? */
+ rc = pn533_send_cmd_direct_async(dev,PN533_CMD_IN_DATA_EXCHANGE,
+ skb,
+ pn533_data_exchange_complete,
+ dev->cmd_complete_dep_arg);
+
+ break;
+ }
+
+ if (rc == 0) /* success */
+ return;
+
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when trying to perform data_exchange", rc);
+
+ dev_kfree_skb(skb);
+ kfree(dev->cmd_complete_dep_arg);
+
+error:
+ pn533_send_ack(dev, GFP_KERNEL);
+ queue_work(dev->wq, &dev->cmd_work);
+}
+
static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
u8 cfgdata_len)
{
@@ -2562,6 +2780,8 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
u8 rf_field = !!rf;
int rc;
+ rf_field |= PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
+
rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
(u8 *)&rf_field, 1);
if (rc) {
@@ -2605,17 +2825,6 @@ static int pn533_setup(struct pn533 *dev)
switch (dev->device_type) {
case PN533_DEVICE_STD:
- max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
- max_retries.mx_rty_psl = 2;
- max_retries.mx_rty_passive_act =
- PN533_CONFIG_MAX_RETRIES_NO_RETRY;
-
- timing.rfu = PN533_CONFIG_TIMING_102;
- timing.atr_res_timeout = PN533_CONFIG_TIMING_204;
- timing.dep_timeout = PN533_CONFIG_TIMING_409;
-
- break;
-
case PN533_DEVICE_PASORI:
case PN533_DEVICE_ACR122U:
max_retries.mx_rty_atr = 0x2;
@@ -2729,9 +2938,11 @@ static int pn533_probe(struct usb_interface *interface,
INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
- INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
+ INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
+ INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
- INIT_WORK(&dev->poll_work, pn533_wq_poll);
+ INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
+ INIT_WORK(&dev->rf_work, pn533_wq_rf);
dev->wq = alloc_ordered_workqueue("pn533", 0);
if (dev->wq == NULL)
goto error;
@@ -2741,6 +2952,7 @@ static int pn533_probe(struct usb_interface *interface,
dev->listen_timer.function = pn533_listen_mode_timer;
skb_queue_head_init(&dev->resp_q);
+ skb_queue_head_init(&dev->fragment_skb);
INIT_LIST_HEAD(&dev->cmd_queue);
@@ -2842,6 +3054,7 @@ static void pn533_disconnect(struct usb_interface *interface)
usb_kill_urb(dev->in_urb);
usb_kill_urb(dev->out_urb);
+ flush_delayed_work(&dev->poll_work);
destroy_workqueue(dev->wq);
skb_queue_purge(&dev->resp_q);
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 8cf64c19f022..01e27d4bdd0d 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -25,11 +25,14 @@
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-
+#include <linux/nfc.h>
+#include <linux/firmware.h>
+#include <linux/unaligned/access_ok.h>
#include <linux/platform_data/pn544.h>
#include <net/nfc/hci.h>
#include <net/nfc/llc.h>
+#include <net/nfc/nfc.h>
#include "pn544.h"
@@ -55,6 +58,58 @@ MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
+#define PN544_FW_CMD_WRITE 0x08
+#define PN544_FW_CMD_CHECK 0x06
+
+struct pn544_i2c_fw_frame_write {
+ u8 cmd;
+ u16 be_length;
+ u8 be_dest_addr[3];
+ u16 be_datalen;
+ u8 data[];
+} __packed;
+
+struct pn544_i2c_fw_frame_check {
+ u8 cmd;
+ u16 be_length;
+ u8 be_start_addr[3];
+ u16 be_datalen;
+ u16 be_crc;
+} __packed;
+
+struct pn544_i2c_fw_frame_response {
+ u8 status;
+ u16 be_length;
+} __packed;
+
+struct pn544_i2c_fw_blob {
+ u32 be_size;
+ u32 be_destaddr;
+ u8 data[];
+};
+
+#define PN544_FW_CMD_RESULT_TIMEOUT 0x01
+#define PN544_FW_CMD_RESULT_BAD_CRC 0x02
+#define PN544_FW_CMD_RESULT_ACCESS_DENIED 0x08
+#define PN544_FW_CMD_RESULT_PROTOCOL_ERROR 0x0B
+#define PN544_FW_CMD_RESULT_INVALID_PARAMETER 0x11
+#define PN544_FW_CMD_RESULT_INVALID_LENGTH 0x18
+#define PN544_FW_CMD_RESULT_WRITE_FAILED 0x74
+
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+
+#define PN544_FW_WRITE_BUFFER_MAX_LEN 0x9f7
+#define PN544_FW_I2C_MAX_PAYLOAD PN544_HCI_I2C_LLC_MAX_SIZE
+#define PN544_FW_I2C_WRITE_FRAME_HEADER_LEN 8
+#define PN544_FW_I2C_WRITE_DATA_MAX_LEN MIN((PN544_FW_I2C_MAX_PAYLOAD -\
+ PN544_FW_I2C_WRITE_FRAME_HEADER_LEN),\
+ PN544_FW_WRITE_BUFFER_MAX_LEN)
+
+#define FW_WORK_STATE_IDLE 1
+#define FW_WORK_STATE_START 2
+#define FW_WORK_STATE_WAIT_WRITE_ANSWER 3
+#define FW_WORK_STATE_WAIT_CHECK_ANSWER 4
+
struct pn544_i2c_phy {
struct i2c_client *i2c_dev;
struct nfc_hci_dev *hdev;
@@ -64,7 +119,18 @@ struct pn544_i2c_phy {
unsigned int gpio_fw;
unsigned int en_polarity;
+ struct work_struct fw_work;
+ int fw_work_state;
+ char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+ const struct firmware *fw;
+ u32 fw_blob_dest_addr;
+ size_t fw_blob_size;
+ const u8 *fw_blob_data;
+ size_t fw_written;
+ int fw_cmd_result;
+
int powered;
+ int run_mode;
int hard_fault; /*
* < 0 if hardware error occured (e.g. i2c err)
@@ -122,15 +188,22 @@ out:
gpio_set_value(phy->gpio_en, !phy->en_polarity);
}
+static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
+{
+ gpio_set_value(phy->gpio_fw, run_mode == PN544_FW_MODE ? 1 : 0);
+ gpio_set_value(phy->gpio_en, phy->en_polarity);
+ usleep_range(10000, 15000);
+
+ phy->run_mode = run_mode;
+}
+
static int pn544_hci_i2c_enable(void *phy_id)
{
struct pn544_i2c_phy *phy = phy_id;
pr_info(DRIVER_DESC ": %s\n", __func__);
- gpio_set_value(phy->gpio_fw, 0);
- gpio_set_value(phy->gpio_en, phy->en_polarity);
- usleep_range(10000, 15000);
+ pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
phy->powered = 1;
@@ -305,6 +378,42 @@ flush:
return r;
}
+static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
+{
+ int r;
+ struct pn544_i2c_fw_frame_response response;
+ struct i2c_client *client = phy->i2c_dev;
+
+ r = i2c_master_recv(client, (char *) &response, sizeof(response));
+ if (r != sizeof(response)) {
+ dev_err(&client->dev, "cannot read fw status\n");
+ return -EIO;
+ }
+
+ usleep_range(3000, 6000);
+
+ switch (response.status) {
+ case 0:
+ return 0;
+ case PN544_FW_CMD_RESULT_TIMEOUT:
+ return -ETIMEDOUT;
+ case PN544_FW_CMD_RESULT_BAD_CRC:
+ return -ENODATA;
+ case PN544_FW_CMD_RESULT_ACCESS_DENIED:
+ return -EACCES;
+ case PN544_FW_CMD_RESULT_PROTOCOL_ERROR:
+ return -EPROTO;
+ case PN544_FW_CMD_RESULT_INVALID_PARAMETER:
+ return -EINVAL;
+ case PN544_FW_CMD_RESULT_INVALID_LENGTH:
+ return -EBADMSG;
+ case PN544_FW_CMD_RESULT_WRITE_FAILED:
+ return -EIO;
+ default:
+ return -EIO;
+ }
+}
+
/*
* Reads an shdlc frame from the chip. This is not as straightforward as it
* seems. There are cases where we could loose the frame start synchronization.
@@ -339,19 +448,23 @@ static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id)
if (phy->hard_fault != 0)
return IRQ_HANDLED;
- r = pn544_hci_i2c_read(phy, &skb);
- if (r == -EREMOTEIO) {
- phy->hard_fault = r;
+ if (phy->run_mode == PN544_FW_MODE) {
+ phy->fw_cmd_result = pn544_hci_i2c_fw_read_status(phy);
+ schedule_work(&phy->fw_work);
+ } else {
+ r = pn544_hci_i2c_read(phy, &skb);
+ if (r == -EREMOTEIO) {
+ phy->hard_fault = r;
- nfc_hci_recv_frame(phy->hdev, NULL);
+ nfc_hci_recv_frame(phy->hdev, NULL);
- return IRQ_HANDLED;
- } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
- return IRQ_HANDLED;
- }
-
- nfc_hci_recv_frame(phy->hdev, skb);
+ return IRQ_HANDLED;
+ } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
+ return IRQ_HANDLED;
+ }
+ nfc_hci_recv_frame(phy->hdev, skb);
+ }
return IRQ_HANDLED;
}
@@ -361,6 +474,215 @@ static struct nfc_phy_ops i2c_phy_ops = {
.disable = pn544_hci_i2c_disable,
};
+static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
+{
+ struct pn544_i2c_phy *phy = phy_id;
+
+ pr_info(DRIVER_DESC ": Starting Firmware Download (%s)\n",
+ firmware_name);
+
+ strcpy(phy->firmware_name, firmware_name);
+
+ phy->fw_work_state = FW_WORK_STATE_START;
+
+ schedule_work(&phy->fw_work);
+
+ return 0;
+}
+
+static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy,
+ int result)
+{
+ pr_info(DRIVER_DESC ": Firmware Download Complete, result=%d\n", result);
+
+ pn544_hci_i2c_disable(phy);
+
+ phy->fw_work_state = FW_WORK_STATE_IDLE;
+
+ if (phy->fw) {
+ release_firmware(phy->fw);
+ phy->fw = NULL;
+ }
+
+ nfc_fw_download_done(phy->hdev->ndev, phy->firmware_name, (u32) -result);
+}
+
+static int pn544_hci_i2c_fw_write_cmd(struct i2c_client *client, u32 dest_addr,
+ const u8 *data, u16 datalen)
+{
+ u8 frame[PN544_FW_I2C_MAX_PAYLOAD];
+ struct pn544_i2c_fw_frame_write *framep;
+ u16 params_len;
+ int framelen;
+ int r;
+
+ if (datalen > PN544_FW_I2C_WRITE_DATA_MAX_LEN)
+ datalen = PN544_FW_I2C_WRITE_DATA_MAX_LEN;
+
+ framep = (struct pn544_i2c_fw_frame_write *) frame;
+
+ params_len = sizeof(framep->be_dest_addr) +
+ sizeof(framep->be_datalen) + datalen;
+ framelen = params_len + sizeof(framep->cmd) +
+ sizeof(framep->be_length);
+
+ framep->cmd = PN544_FW_CMD_WRITE;
+
+ put_unaligned_be16(params_len, &framep->be_length);
+
+ framep->be_dest_addr[0] = (dest_addr & 0xff0000) >> 16;
+ framep->be_dest_addr[1] = (dest_addr & 0xff00) >> 8;
+ framep->be_dest_addr[2] = dest_addr & 0xff;
+
+ put_unaligned_be16(datalen, &framep->be_datalen);
+
+ memcpy(framep->data, data, datalen);
+
+ r = i2c_master_send(client, frame, framelen);
+
+ if (r == framelen)
+ return datalen;
+ else if (r < 0)
+ return r;
+ else
+ return -EIO;
+}
+
+static int pn544_hci_i2c_fw_check_cmd(struct i2c_client *client, u32 start_addr,
+ const u8 *data, u16 datalen)
+{
+ struct pn544_i2c_fw_frame_check frame;
+ int r;
+ u16 crc;
+
+ /* calculate local crc for the data we want to check */
+ crc = crc_ccitt(0xffff, data, datalen);
+
+ frame.cmd = PN544_FW_CMD_CHECK;
+
+ put_unaligned_be16(sizeof(frame.be_start_addr) +
+ sizeof(frame.be_datalen) + sizeof(frame.be_crc),
+ &frame.be_length);
+
+ /* tell the chip the memory region to which our crc applies */
+ frame.be_start_addr[0] = (start_addr & 0xff0000) >> 16;
+ frame.be_start_addr[1] = (start_addr & 0xff00) >> 8;
+ frame.be_start_addr[2] = start_addr & 0xff;
+
+ put_unaligned_be16(datalen, &frame.be_datalen);
+
+ /*
+ * and give our local crc. Chip will calculate its own crc for the
+ * region and compare with ours.
+ */
+ put_unaligned_be16(crc, &frame.be_crc);
+
+ r = i2c_master_send(client, (const char *) &frame, sizeof(frame));
+
+ if (r == sizeof(frame))
+ return 0;
+ else if (r < 0)
+ return r;
+ else
+ return -EIO;
+}
+
+static int pn544_hci_i2c_fw_write_chunk(struct pn544_i2c_phy *phy)
+{
+ int r;
+
+ r = pn544_hci_i2c_fw_write_cmd(phy->i2c_dev,
+ phy->fw_blob_dest_addr + phy->fw_written,
+ phy->fw_blob_data + phy->fw_written,
+ phy->fw_blob_size - phy->fw_written);
+ if (r < 0)
+ return r;
+
+ phy->fw_written += r;
+ phy->fw_work_state = FW_WORK_STATE_WAIT_WRITE_ANSWER;
+
+ return 0;
+}
+
+static void pn544_hci_i2c_fw_work(struct work_struct *work)
+{
+ struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy,
+ fw_work);
+ int r;
+ struct pn544_i2c_fw_blob *blob;
+
+ switch (phy->fw_work_state) {
+ case FW_WORK_STATE_START:
+ pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE);
+
+ r = request_firmware(&phy->fw, phy->firmware_name,
+ &phy->i2c_dev->dev);
+ if (r < 0)
+ goto exit_state_start;
+
+ blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
+ phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
+ phy->fw_blob_dest_addr = get_unaligned_be32(&blob->be_destaddr);
+ phy->fw_blob_data = blob->data;
+
+ phy->fw_written = 0;
+ r = pn544_hci_i2c_fw_write_chunk(phy);
+
+exit_state_start:
+ if (r < 0)
+ pn544_hci_i2c_fw_work_complete(phy, r);
+ break;
+
+ case FW_WORK_STATE_WAIT_WRITE_ANSWER:
+ r = phy->fw_cmd_result;
+ if (r < 0)
+ goto exit_state_wait_write_answer;
+
+ if (phy->fw_written == phy->fw_blob_size) {
+ r = pn544_hci_i2c_fw_check_cmd(phy->i2c_dev,
+ phy->fw_blob_dest_addr,
+ phy->fw_blob_data,
+ phy->fw_blob_size);
+ if (r < 0)
+ goto exit_state_wait_write_answer;
+ phy->fw_work_state = FW_WORK_STATE_WAIT_CHECK_ANSWER;
+ break;
+ }
+
+ r = pn544_hci_i2c_fw_write_chunk(phy);
+
+exit_state_wait_write_answer:
+ if (r < 0)
+ pn544_hci_i2c_fw_work_complete(phy, r);
+ break;
+
+ case FW_WORK_STATE_WAIT_CHECK_ANSWER:
+ r = phy->fw_cmd_result;
+ if (r < 0)
+ goto exit_state_wait_check_answer;
+
+ blob = (struct pn544_i2c_fw_blob *) (phy->fw_blob_data +
+ phy->fw_blob_size);
+ phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
+ if (phy->fw_blob_size != 0) {
+ phy->fw_blob_dest_addr =
+ get_unaligned_be32(&blob->be_destaddr);
+ phy->fw_blob_data = blob->data;
+
+ phy->fw_written = 0;
+ r = pn544_hci_i2c_fw_write_chunk(phy);
+ }
+
+exit_state_wait_check_answer:
+ if (r < 0 || phy->fw_blob_size == 0)
+ pn544_hci_i2c_fw_work_complete(phy, r);
+ break;
+
+ default:
+ break;
+ }
+}
+
static int pn544_hci_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -384,6 +706,9 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
return -ENOMEM;
}
+ INIT_WORK(&phy->fw_work, pn544_hci_i2c_fw_work);
+ phy->fw_work_state = FW_WORK_STATE_IDLE;
+
phy->i2c_dev = client;
i2c_set_clientdata(client, phy);
@@ -420,7 +745,8 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM,
- PN544_HCI_I2C_LLC_MAX_PAYLOAD, &phy->hdev);
+ PN544_HCI_I2C_LLC_MAX_PAYLOAD,
+ pn544_hci_i2c_fw_download, &phy->hdev);
if (r < 0)
goto err_hci;
@@ -443,6 +769,10 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
dev_dbg(&client->dev, "%s\n", __func__);
+ cancel_work_sync(&phy->fw_work);
+ if (phy->fw_work_state != FW_WORK_STATE_IDLE)
+ pn544_hci_i2c_fw_work_complete(phy, -ENODEV);
+
pn544_hci_remove(phy->hdev);
if (phy->powered)
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index b5d3d18179eb..ee67de50c36f 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -45,7 +45,7 @@ static int pn544_mei_probe(struct mei_cl_device *device,
r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
- &phy->hdev);
+ NULL, &phy->hdev);
if (r < 0) {
nfc_mei_phy_free(phy);
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 0d17da7675b7..078e62feba17 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -31,9 +31,6 @@
/* Timing restrictions (ms) */
#define PN544_HCI_RESETVEN_TIME 30
-#define HCI_MODE 0
-#define FW_MODE 1
-
enum pn544_state {
PN544_ST_COLD,
PN544_ST_FW_READY,
@@ -130,6 +127,8 @@ struct pn544_hci_info {
int async_cb_type;
data_exchange_cb_t async_cb;
void *async_cb_context;
+
+ fw_download_t fw_download;
};
static int pn544_hci_open(struct nfc_hci_dev *hdev)
@@ -782,6 +781,17 @@ exit:
return r;
}
+static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
+ const char *firmware_name)
+{
+ struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ if (info->fw_download == NULL)
+ return -ENOTSUPP;
+
+ return info->fw_download(info->phy_id, firmware_name);
+}
+
static struct nfc_hci_ops pn544_hci_ops = {
.open = pn544_hci_open,
.close = pn544_hci_close,
@@ -796,11 +806,12 @@ static struct nfc_hci_ops pn544_hci_ops = {
.tm_send = pn544_hci_tm_send,
.check_presence = pn544_hci_check_presence,
.event_received = pn544_hci_event_received,
+ .fw_download = pn544_hci_fw_download,
};
int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
int phy_headroom, int phy_tailroom, int phy_payload,
- struct nfc_hci_dev **hdev)
+ fw_download_t fw_download, struct nfc_hci_dev **hdev)
{
struct pn544_hci_info *info;
u32 protocols;
@@ -816,6 +827,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
info->phy_ops = phy_ops;
info->phy_id = phy_id;
+ info->fw_download = fw_download;
info->state = PN544_ST_COLD;
mutex_init(&info->info_lock);
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
index f47c6454914b..01020e585443 100644
--- a/drivers/nfc/pn544/pn544.h
+++ b/drivers/nfc/pn544/pn544.h
@@ -24,9 +24,14 @@
#define DRIVER_DESC "HCI NFC driver for PN544"
+#define PN544_HCI_MODE 0
+#define PN544_FW_MODE 1
+
+typedef int (*fw_download_t)(void *context, const char *firmware_name);
+
int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
int phy_headroom, int phy_tailroom, int phy_payload,
- struct nfc_hci_dev **hdev);
+ fw_download_t fw_download, struct nfc_hci_dev **hdev);
void pn544_hci_remove(struct nfc_hci_dev *hdev);
#endif /* __LOCAL_PN544_H_ */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b821a62958fd..e8ccf6c0f08a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3998,6 +3998,49 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
}
/**
+ * pcie_get_minimum_link - determine minimum link settings of a PCI device
+ * @dev: PCI device to query
+ * @speed: storage for minimum speed
+ * @width: storage for minimum width
+ *
+ * This function will walk up the PCI device chain and determine the minimum
+ * link width and speed of the device.
+ */
+int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ int ret;
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ while (dev) {
+ u16 lnksta;
+ enum pci_bus_speed next_speed;
+ enum pcie_link_width next_width;
+
+ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+ if (ret)
+ return ret;
+
+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT;
+
+ if (next_speed < *speed)
+ *speed = next_speed;
+
+ if (next_width < *width)
+ *width = next_width;
+
+ dev = dev->bus->self;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_get_minimum_link);
+
+/**
* pci_select_bars - Make BAR mask from the type of resource
* @dev: the PCI device for which BAR mask is made
* @flags: resource type mask to be selected
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 816c297f170c..8a00c063d7bc 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -6,6 +6,9 @@
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
+extern const unsigned char pcix_bus_speed[];
+extern const unsigned char pcie_link_speed[];
+
/* Functions internal to the PCI core code */
int pci_create_sysfs_dev_files(struct pci_dev *pdev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index eeb50bd62402..4f9cc93c3b59 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -518,7 +518,7 @@ static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
return bridge;
}
-static unsigned char pcix_bus_speed[] = {
+const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCI_SPEED_66MHz_PCIX, /* 1 */
PCI_SPEED_100MHz_PCIX, /* 2 */
@@ -537,7 +537,7 @@ static unsigned char pcix_bus_speed[] = {
PCI_SPEED_133MHz_PCIX_533 /* F */
};
-static unsigned char pcie_link_speed[] = {
+const unsigned char pcie_link_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCIE_SPEED_2_5GT, /* 1 */
PCIE_SPEED_5_0GT, /* 2 */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index 37049e433c9e..7052a839b0ea 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -581,8 +581,10 @@ struct iscsi_kwqe_init1 {
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
-#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
-#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
u16 cq_num_wqes;
#elif defined(__LITTLE_ENDIAN)
u16 cq_num_wqes;
@@ -593,8 +595,10 @@ struct iscsi_kwqe_init1 {
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
-#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
-#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
u8 cq_log_wqes_per_page;
#endif
#if defined(__BIG_ENDIAN)
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index b6f6f436777b..34c294b42c84 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -172,16 +172,14 @@ void bnx2i_start(void *handle)
struct bnx2i_hba *hba = handle;
int i = HZ;
- /*
- * We should never register devices that don't support iSCSI
- * (see bnx2i_init_one), so something is wrong if we try to
- * start a iSCSI adapter on hardware with 0 supported iSCSI
- * connections
+ /* On some bnx2x devices, it is possible that iSCSI is no
+ * longer supported after firmware is downloaded. In that
+ * case, the iscsi_init_msg will return failure.
*/
- BUG_ON(!hba->cnic->max_iscsi_conn);
bnx2i_send_fw_iscsi_init_msg(hba);
- while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) &&
+ !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
}
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 36171fd2826b..2cd9b0e44a41 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -138,7 +138,7 @@ config SSB_DRIVER_MIPS
config SSB_SFLASH
bool "SSB serial flash support"
- depends on SSB_DRIVER_MIPS && BROKEN
+ depends on SSB_DRIVER_MIPS
default y
# Assumption: We are on embedded, if we compile the MIPS core.
diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c
index e84cf04f4416..50328de712fa 100644
--- a/drivers/ssb/driver_chipcommon_sflash.c
+++ b/drivers/ssb/driver_chipcommon_sflash.c
@@ -151,8 +151,8 @@ int ssb_sflash_init(struct ssb_chipcommon *cc)
sflash->size = sflash->blocksize * sflash->numblocks;
sflash->present = true;
- pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n",
- e->name, e->blocksize, e->numblocks);
+ pr_info("Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
+ e->name, sflash->size / 1024, e->blocksize, e->numblocks);
/* Prepare platform device, but don't register it yet. It's too early,
* malloc (required by device_private_init) is not available yet. */
@@ -160,7 +160,5 @@ int ssb_sflash_init(struct ssb_chipcommon *cc)
sflash->size;
ssb_sflash_dev.dev.platform_data = sflash;
- pr_err("Serial flash support is not implemented yet!\n");
-
- return -ENOTSUPP;
+ return 0;
}
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index 57a08c5771f2..8acff44a9e75 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
apdev_priv = netdev_priv(pDevice->apdev);
*apdev_priv = *pDevice;
- memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(pDevice->apdev, dev);
pDevice->apdev->netdev_ops = &apdev_netdev_ops;
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 46e0e41e7e60..b5cd2e44e53d 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -460,7 +460,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
}
if (sValue.dwValue == 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
- memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(pDevice->wpadev, pDevice->dev);
pDevice->bWPADEVUp = true;
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 869f62c678e6..e8d9ecd2913a 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -96,7 +96,7 @@ static int wpa_init_wpadev(PSDevice pDevice)
wpadev_priv = netdev_priv(pDevice->wpadev);
*wpadev_priv = *pDevice;
- memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(pDevice->wpadev, dev);
pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 969a85960e9f..831eb4fd197d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -276,12 +276,12 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
* of used idx. Once lower device DMA done contiguously, we will signal KVM
* guest used idx.
*/
-static int vhost_zerocopy_signal_used(struct vhost_net *net,
- struct vhost_virtqueue *vq)
+static void vhost_zerocopy_signal_used(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
{
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
- int i;
+ int i, add;
int j = 0;
for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
@@ -289,15 +289,17 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net,
vhost_net_tx_err(net);
if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
- vhost_add_used_and_signal(vq->dev, vq,
- vq->heads[i].id, 0);
++j;
} else
break;
}
- if (j)
- nvq->done_idx = i;
- return j;
+ while (j) {
+ add = min(UIO_MAXIOV - nvq->done_idx, j);
+ vhost_add_used_and_signal_n(vq->dev, vq,
+ &vq->heads[nvq->done_idx], add);
+ nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
+ j -= add;
+ }
}
static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
@@ -306,6 +308,11 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
struct vhost_virtqueue *vq = ubufs->vq;
int cnt = atomic_read(&ubufs->kref.refcount);
+ /* set len to mark this desc buffers done DMA */
+ vq->heads[ubuf->desc].len = success ?
+ VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+ vhost_net_ubuf_put(ubufs);
+
/*
* Trigger polling thread if guest stopped submitting new buffers:
* in this case, the refcount after decrement will eventually reach 1
@@ -316,10 +323,6 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
*/
if (cnt <= 2 || !(cnt % 16))
vhost_poll_queue(&vq->poll);
- /* set len to mark this desc buffers done DMA */
- vq->heads[ubuf->desc].len = success ?
- VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
- vhost_net_ubuf_put(ubufs);
}
/* Expects to be always run from workqueue - which acts as
@@ -360,6 +363,13 @@ static void handle_tx(struct vhost_net *net)
if (zcopy)
vhost_zerocopy_signal_used(net, vq);
+ /* If more outstanding DMAs, queue the work.
+ * Handle upend_idx wrap around
+ */
+ if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
+ % UIO_MAXIOV == nvq->done_idx))
+ break;
+
head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
@@ -369,17 +379,6 @@ static void handle_tx(struct vhost_net *net)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- int num_pends;
-
- /* If more outstanding DMAs, queue the work.
- * Handle upend_idx wrap around
- */
- num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
- (nvq->upend_idx - nvq->done_idx) :
- (nvq->upend_idx + UIO_MAXIOV -
- nvq->done_idx);
- if (unlikely(num_pends > VHOST_MAX_PEND))
- break;
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
@@ -402,43 +401,36 @@ static void handle_tx(struct vhost_net *net)
iov_length(nvq->hdr, s), hdr_size);
break;
}
- zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
- nvq->upend_idx != nvq->done_idx);
+
+ zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
+ && (nvq->upend_idx + 1) % UIO_MAXIOV !=
+ nvq->done_idx
+ && vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
+ struct ubuf_info *ubuf;
+ ubuf = nvq->ubuf_info + nvq->upend_idx;
+
vq->heads[nvq->upend_idx].id = head;
- if (!vhost_net_tx_select_zcopy(net) ||
- len < VHOST_GOODCOPY_LEN) {
- /* copy don't need to wait for DMA done */
- vq->heads[nvq->upend_idx].len =
- VHOST_DMA_DONE_LEN;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- ubufs = NULL;
- } else {
- struct ubuf_info *ubuf;
- ubuf = nvq->ubuf_info + nvq->upend_idx;
-
- vq->heads[nvq->upend_idx].len =
- VHOST_DMA_IN_PROGRESS;
- ubuf->callback = vhost_zerocopy_callback;
- ubuf->ctx = nvq->ubufs;
- ubuf->desc = nvq->upend_idx;
- msg.msg_control = ubuf;
- msg.msg_controllen = sizeof(ubuf);
- ubufs = nvq->ubufs;
- kref_get(&ubufs->kref);
- }
+ vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
+ ubuf->callback = vhost_zerocopy_callback;
+ ubuf->ctx = nvq->ubufs;
+ ubuf->desc = nvq->upend_idx;
+ msg.msg_control = ubuf;
+ msg.msg_controllen = sizeof(ubuf);
+ ubufs = nvq->ubufs;
+ kref_get(&ubufs->kref);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
- } else
+ } else {
msg.msg_control = NULL;
+ ubufs = NULL;
+ }
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- if (ubufs)
- vhost_net_ubuf_put(ubufs);
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e58cf0001cee..9a9502a4aa50 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -13,7 +13,7 @@
#include <linux/eventfd.h>
#include <linux/vhost.h>
-#include <linux/socket.h> /* memcpy_fromiovec */
+#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
@@ -1332,48 +1332,9 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
- struct vring_used_elem __user *used;
+ struct vring_used_elem heads = { head, len };
- /* The virtqueue contains a ring of used buffers. Get a pointer to the
- * next entry in that used ring. */
- used = &vq->used->ring[vq->last_used_idx % vq->num];
- if (__put_user(head, &used->id)) {
- vq_err(vq, "Failed to write used id");
- return -EFAULT;
- }
- if (__put_user(len, &used->len)) {
- vq_err(vq, "Failed to write used len");
- return -EFAULT;
- }
- /* Make sure buffer is written before we update index. */
- smp_wmb();
- if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
- vq_err(vq, "Failed to increment used idx");
- return -EFAULT;
- }
- if (unlikely(vq->log_used)) {
- /* Make sure data is seen before log. */
- smp_wmb();
- /* Log used ring entry write. */
- log_write(vq->log_base,
- vq->log_addr +
- ((void __user *)used - (void __user *)vq->used),
- sizeof *used);
- /* Log used index update. */
- log_write(vq->log_base,
- vq->log_addr + offsetof(struct vring_used, idx),
- sizeof vq->used->idx);
- if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
- }
- vq->last_used_idx++;
- /* If the driver never bothers to signal in a very long while,
- * used index might wrap around. If that happens, invalidate
- * signalled_used index we stored. TODO: make sure driver
- * signals at least once in 2^16 and remove this. */
- if (unlikely(vq->last_used_idx == vq->signalled_used))
- vq->signalled_used_valid = false;
- return 0;
+ return vhost_add_used_n(vq, &heads, 1);
}
EXPORT_SYMBOL_GPL(vhost_add_used);
@@ -1387,7 +1348,16 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
start = vq->last_used_idx % vq->num;
used = vq->used->ring + start;
- if (__copy_to_user(used, heads, count * sizeof *used)) {
+ if (count == 1) {
+ if (__put_user(heads[0].id, &used->id)) {
+ vq_err(vq, "Failed to write used id");
+ return -EFAULT;
+ }
+ if (__put_user(heads[0].len, &used->len)) {
+ vq_err(vq, "Failed to write used len");
+ return -EFAULT;
+ }
+ } else if (__copy_to_user(used, heads, count * sizeof *used)) {
vq_err(vq, "Failed to write used");
return -EFAULT;
}
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 622fc505d3e1..4d043c30216f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -72,7 +72,19 @@ struct bcma_host_ops {
/* Core-ID values. */
#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */
#define BCMA_CORE_4706_CHIPCOMMON 0x500
+#define BCMA_CORE_PCIEG2 0x501
+#define BCMA_CORE_DMA 0x502
+#define BCMA_CORE_SDIO3 0x503
+#define BCMA_CORE_USB20 0x504
+#define BCMA_CORE_USB30 0x505
+#define BCMA_CORE_A9JTAG 0x506
+#define BCMA_CORE_DDR23 0x507
+#define BCMA_CORE_ROM 0x508
+#define BCMA_CORE_NAND 0x509
+#define BCMA_CORE_QSPI 0x50A
+#define BCMA_CORE_CHIPCOMMON_B 0x50B
#define BCMA_CORE_4706_SOC_RAM 0x50E
+#define BCMA_CORE_ARMCA9 0x510
#define BCMA_CORE_4706_MAC_GBIT 0x52D
#define BCMA_CORE_AMEMC 0x52E /* DDR1/2 memory controller core */
#define BCMA_CORE_ALTA 0x534 /* I2S core */
@@ -177,6 +189,11 @@ struct bcma_host_ops {
#define BCMA_PKG_ID_BCM5357 11
#define BCMA_CHIP_ID_BCM53572 53572
#define BCMA_PKG_ID_BCM47188 9
+#define BCMA_CHIP_ID_BCM4707 53010
+#define BCMA_PKG_ID_BCM4707 1
+#define BCMA_PKG_ID_BCM4708 2
+#define BCMA_PKG_ID_BCM4709 0
+#define BCMA_CHIP_ID_BCM53018 53018
/* Board types (on PCI usually equals to the subsystem dev id) */
/* BCM4313 */
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 424760f01b9d..d66033f418c9 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -181,10 +181,31 @@ struct pci_dev;
#define BCMA_CORE_PCI_CFG_DEVCTRL 0xd8
+#define BCMA_CORE_PCI_
+
+/* MDIO devices (SERDES modules) */
+#define BCMA_CORE_PCI_MDIO_IEEE0 0x000
+#define BCMA_CORE_PCI_MDIO_IEEE1 0x001
+#define BCMA_CORE_PCI_MDIO_BLK0 0x800
+#define BCMA_CORE_PCI_MDIO_BLK1 0x801
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT0 0x16
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT1 0x17
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT2 0x18
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT3 0x19
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT4 0x1A
+#define BCMA_CORE_PCI_MDIO_BLK2 0x802
+#define BCMA_CORE_PCI_MDIO_BLK3 0x803
+#define BCMA_CORE_PCI_MDIO_BLK4 0x804
+#define BCMA_CORE_PCI_MDIO_TXPLL 0x808 /* TXPLL register block idx */
+#define BCMA_CORE_PCI_MDIO_TXCTRL0 0x820
+#define BCMA_CORE_PCI_MDIO_SERDESID 0x831
+#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
+
/* PCIE Root Capability Register bits (Host mode only) */
#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
struct bcma_drv_pci;
+struct bcma_bus;
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
struct bcma_drv_pci_host {
@@ -219,7 +240,8 @@ struct bcma_drv_pci {
extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
struct bcma_device *core, bool enable);
-extern void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend);
+extern void bcma_core_pci_up(struct bcma_bus *bus);
+extern void bcma_core_pci_down(struct bcma_bus *bus);
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
index 089fe43211a4..dc029dba7a03 100644
--- a/include/linux/can/platform/mcp251x.h
+++ b/include/linux/can/platform/mcp251x.h
@@ -9,26 +9,13 @@
#include <linux/spi/spi.h>
-/**
+/*
* struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
* @oscillator_frequency: - oscillator frequency in Hz
- * @irq_flags: - IRQF configuration flags
- * @board_specific_setup: - called before probing the chip (power,reset)
- * @transceiver_enable: - called to power on/off the transceiver
- * @power_enable: - called to power on/off the mcp *and* the
- * transceiver
- *
- * Please note that you should define power_enable or transceiver_enable or
- * none of them. Defining both of them is no use.
- *
*/
struct mcp251x_platform_data {
unsigned long oscillator_frequency;
- unsigned long irq_flags;
- int (*board_specific_setup)(struct spi_device *spi);
- int (*transceiver_enable)(int enable);
- int (*power_enable) (int enable);
};
#endif /* __CAN_PLATFORM_MCP251X_H__ */
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
index 96e87693d933..841925fbfe8a 100644
--- a/include/linux/dm9000.h
+++ b/include/linux/dm9000.h
@@ -14,6 +14,8 @@
#ifndef __DM9000_PLATFORM_DATA
#define __DM9000_PLATFORM_DATA __FILE__
+#include <linux/if_ether.h>
+
/* IO control flags */
#define DM9000_PLATF_8BITONLY (0x0001)
@@ -27,7 +29,7 @@
struct dm9000_plat_data {
unsigned int flags;
- unsigned char dev_addr[6];
+ unsigned char dev_addr[ETH_ALEN];
/* allow replacement IO routines */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 94af41858513..3a8d0a2af607 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -132,9 +132,8 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
- if (ret)
- memset(ret, 0, size);
+ void *ret = dma_alloc_coherent(dev, size, dma_handle,
+ flag | __GFP_ZERO);
return ret;
}
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index c623861964e4..d8b512496e50 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -199,6 +199,21 @@ static inline void eth_hw_addr_random(struct net_device *dev)
}
/**
+ * eth_hw_addr_inherit - Copy dev_addr from another net_device
+ * @dst: pointer to net_device to copy dev_addr to
+ * @src: pointer to net_device to copy dev_addr from
+ *
+ * Copy the Ethernet address from one net_device to another along with
+ * the address attributes (addr_assign_type).
+ */
+static inline void eth_hw_addr_inherit(struct net_device *dst,
+ struct net_device *src)
+{
+ dst->addr_assign_type = src->addr_assign_type;
+ memcpy(dst->dev_addr, src->dev_addr, ETH_ALEN);
+}
+
+/**
* compare_ether_addr - Compare two Ethernet addresses
* @addr1: Pointer to a six-byte array containing the Ethernet address
* @addr2: Pointer other six-byte array containing the Ethernet address
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
index 51b793466ff3..efb05961bdd8 100644
--- a/include/linux/fs_enet_pd.h
+++ b/include/linux/fs_enet_pd.h
@@ -16,8 +16,10 @@
#ifndef FS_ENET_PD_H
#define FS_ENET_PD_H
+#include <linux/clk.h>
#include <linux/string.h>
#include <linux/of_mdio.h>
+#include <linux/if_ether.h>
#include <asm/types.h>
#define FS_ENET_NAME "fs_enet"
@@ -135,13 +137,15 @@ struct fs_platform_info {
const struct fs_mii_bus_info *bus_info;
int rx_ring, tx_ring; /* number of buffers on rx */
- __u8 macaddr[6]; /* mac address */
+ __u8 macaddr[ETH_ALEN]; /* mac address */
int rx_copybreak; /* limit we copy small frames */
int use_napi; /* use NAPI */
int napi_weight; /* NAPI weight */
int use_rmii; /* use RMII mode */
int has_phy; /* if the network is phy container as well...*/
+
+ struct clk *clk_per; /* 'per' clock for register access */
};
struct fs_mii_fec_platform_info {
u32 irq[32];
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b0dc87a2a376..a5b598a79bec 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -16,6 +16,7 @@
#define LINUX_IEEE80211_H
#include <linux/types.h>
+#include <linux/if_ether.h>
#include <asm/byteorder.h>
/*
@@ -209,28 +210,28 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[6];
- u8 addr2[6];
- u8 addr3[6];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
__le16 seq_ctrl;
- u8 addr4[6];
+ u8 addr4[ETH_ALEN];
} __packed __aligned(2);
struct ieee80211_hdr_3addr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[6];
- u8 addr2[6];
- u8 addr3[6];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
__le16 seq_ctrl;
} __packed __aligned(2);
struct ieee80211_qos_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[6];
- u8 addr2[6];
- u8 addr3[6];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
__le16 seq_ctrl;
__le16 qos_ctrl;
} __packed __aligned(2);
@@ -608,8 +609,8 @@ struct ieee80211s_hdr {
u8 flags;
u8 ttl;
__le32 seqnum;
- u8 eaddr1[6];
- u8 eaddr2[6];
+ u8 eaddr1[ETH_ALEN];
+ u8 eaddr2[ETH_ALEN];
} __packed __aligned(2);
/* Mesh flags */
@@ -758,7 +759,7 @@ struct ieee80211_rann_ie {
u8 rann_flags;
u8 rann_hopcount;
u8 rann_ttl;
- u8 rann_addr[6];
+ u8 rann_addr[ETH_ALEN];
__le32 rann_seq;
__le32 rann_interval;
__le32 rann_metric;
@@ -802,9 +803,9 @@ enum ieee80211_vht_opmode_bits {
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
- u8 da[6];
- u8 sa[6];
- u8 bssid[6];
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
__le16 seq_ctrl;
union {
struct {
@@ -833,7 +834,7 @@ struct ieee80211_mgmt {
struct {
__le16 capab_info;
__le16 listen_interval;
- u8 current_ap[6];
+ u8 current_ap[ETH_ALEN];
/* followed by SSID and Supported rates */
u8 variable[0];
} __packed reassoc_req;
@@ -966,21 +967,21 @@ struct ieee80211_vendor_ie {
struct ieee80211_rts {
__le16 frame_control;
__le16 duration;
- u8 ra[6];
- u8 ta[6];
+ u8 ra[ETH_ALEN];
+ u8 ta[ETH_ALEN];
} __packed __aligned(2);
struct ieee80211_cts {
__le16 frame_control;
__le16 duration;
- u8 ra[6];
+ u8 ra[ETH_ALEN];
} __packed __aligned(2);
struct ieee80211_pspoll {
__le16 frame_control;
__le16 aid;
- u8 bssid[6];
- u8 ta[6];
+ u8 bssid[ETH_ALEN];
+ u8 ta[ETH_ALEN];
} __packed __aligned(2);
/* TDLS */
@@ -989,14 +990,14 @@ struct ieee80211_pspoll {
struct ieee80211_tdls_lnkie {
u8 ie_type; /* Link Identifier IE */
u8 ie_len;
- u8 bssid[6];
- u8 init_sta[6];
- u8 resp_sta[6];
+ u8 bssid[ETH_ALEN];
+ u8 init_sta[ETH_ALEN];
+ u8 resp_sta[ETH_ALEN];
} __packed;
struct ieee80211_tdls_data {
- u8 da[6];
- u8 sa[6];
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
__be16 ether_type;
u8 payload_type;
u8 category;
@@ -1090,8 +1091,8 @@ struct ieee80211_p2p_noa_attr {
struct ieee80211_bar {
__le16 frame_control;
__le16 duration;
- __u8 ra[6];
- __u8 ta[6];
+ __u8 ra[ETH_ALEN];
+ __u8 ta[ETH_ALEN];
__le16 control;
__le16 start_seq_num;
} __packed;
@@ -1709,6 +1710,10 @@ enum ieee80211_eid {
WLAN_EID_OPMODE_NOTIF = 199,
WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
+ WLAN_EID_EXTENDED_BSS_LOAD = 193,
+ WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_AID = 197,
+ WLAN_EID_QUIET_CHANNEL = 198,
/* 802.11ad */
WLAN_EID_NON_TX_BSSID_CAP = 83,
@@ -1860,6 +1865,11 @@ enum ieee80211_tdls_actioncode {
WLAN_TDLS_DISCOVERY_REQUEST = 10,
};
+/* Interworking capabilities are set in 7th bit of 4th byte of the
+ * @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA4_INTERWORKING_ENABLED BIT(7)
+
/*
* TDLS capabililites to be enabled in the 5th byte of the
* @WLAN_EID_EXT_CAPABILITY information element
@@ -2279,4 +2289,8 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
return !!(tim->virtual_map[index] & mask);
}
+/* convert time units */
+#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
+#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index f6156f91eb1c..a899dc24be15 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -10,9 +10,9 @@
#ifndef _LINUX_IF_TEAM_H_
#define _LINUX_IF_TEAM_H_
-
#include <linux/netpoll.h>
#include <net/sch_generic.h>
+#include <linux/types.h>
#include <uapi/linux/if_team.h>
struct team_pcpu_stats {
@@ -194,6 +194,18 @@ struct team {
bool user_carrier_enabled;
bool queue_override_enabled;
struct list_head *qom_lists; /* array of queue override mapping lists */
+ struct {
+ unsigned int count;
+ unsigned int interval; /* in ms */
+ atomic_t count_pending;
+ struct delayed_work dw;
+ } notify_peers;
+ struct {
+ unsigned int count;
+ unsigned int interval; /* in ms */
+ atomic_t count_pending;
+ struct delayed_work dw;
+ } mcast_rejoin;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index e3362b5f13e8..f47550d75f85 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -129,6 +129,5 @@ extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
-extern void ip_mc_rejoin_groups(struct in_device *in_dev);
#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index b8b7dc755752..28ea38439313 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -19,6 +19,8 @@ struct ipv6_devconf {
__s32 rtr_solicit_interval;
__s32 rtr_solicit_delay;
__s32 force_mld_version;
+ __s32 mldv1_unsolicited_report_interval;
+ __s32 mldv2_unsolicited_report_interval;
#ifdef CONFIG_IPV6_PRIVACY
__s32 use_tempaddr;
__s32 temp_valid_lft;
@@ -48,6 +50,7 @@ struct ipv6_devconf {
__s32 accept_dad;
__s32 force_tllao;
__s32 ndisc_notify;
+ __s32 suppress_frag_ndisc;
void *sysctl;
};
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index bb1c8096a7eb..cd1fdf75103b 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -69,6 +69,7 @@ enum {
MLX4_CMD_SET_ICM_SIZE = 0xffd,
/*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
+ MLX4_CMD_GET_OP_REQ = 0x59,
/* TPT commands */
MLX4_CMD_SW2HW_MPT = 0xd,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d73423c37c25..24ce6bdd540e 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -33,6 +33,7 @@
#ifndef MLX4_DEVICE_H
#define MLX4_DEVICE_H
+#include <linux/if_ether.h>
#include <linux/pci.h>
#include <linux/completion.h>
#include <linux/radix-tree.h>
@@ -207,6 +208,7 @@ enum mlx4_event {
MLX4_EVENT_TYPE_CMD = 0x0a,
MLX4_EVENT_TYPE_VEP_UPDATE = 0x19,
MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
+ MLX4_EVENT_TYPE_OP_REQUIRED = 0x1a,
MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
@@ -619,7 +621,7 @@ struct mlx4_eth_av {
u8 dgid[16];
u32 reserved4[2];
__be16 vlan;
- u8 mac[6];
+ u8 mac[ETH_ALEN];
};
union mlx4_ext_av {
@@ -913,10 +915,10 @@ enum mlx4_net_trans_promisc_mode {
};
struct mlx4_spec_eth {
- u8 dst_mac[6];
- u8 dst_mac_msk[6];
- u8 src_mac[6];
- u8 src_mac_msk[6];
+ u8 dst_mac[ETH_ALEN];
+ u8 dst_mac_msk[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ u8 src_mac_msk[ETH_ALEN];
u8 ether_type_enable;
__be16 ether_type;
__be16 vlan_id_msk;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 262deac02c9e..6d351473c292 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -34,6 +34,7 @@
#define MLX4_QP_H
#include <linux/types.h>
+#include <linux/if_ether.h>
#include <linux/mlx4/device.h>
@@ -143,7 +144,7 @@ struct mlx4_qp_path {
u8 feup;
u8 fvl_rx;
u8 reserved4[2];
- u8 dmac[6];
+ u8 dmac[ETH_ALEN];
};
enum { /* fl */
@@ -318,7 +319,7 @@ struct mlx4_wqe_datagram_seg {
__be32 dqpn;
__be32 qkey;
__be16 vlan;
- u8 mac[6];
+ u8 mac[ETH_ALEN];
};
struct mlx4_wqe_lso_seg {
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 6e8215b15998..61a0da38d0cb 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -6,6 +6,7 @@
#define __LINUX_MV643XX_ETH_H
#include <linux/mbus.h>
+#include <linux/if_ether.h>
#define MV643XX_ETH_SHARED_NAME "mv643xx_eth"
#define MV643XX_ETH_NAME "mv643xx_eth_port"
@@ -48,7 +49,7 @@ struct mv643xx_eth_platform_data {
* Use this MAC address if it is valid, overriding the
* address that is already in the hardware.
*/
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
/*
* If speed is 0, autonegotiation is enabled.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9a4156845e93..8ed4ae943053 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -728,6 +728,16 @@ struct netdev_fcoe_hbainfo {
};
#endif
+#define MAX_PHYS_PORT_ID_LEN 32
+
+/* This structure holds a unique identifier to identify the
+ * physical port used by a netdevice.
+ */
+struct netdev_phys_port_id {
+ unsigned char id[MAX_PHYS_PORT_ID_LEN];
+ unsigned char id_len;
+};
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -932,6 +942,25 @@ struct netdev_fcoe_hbainfo {
* that determine carrier state from physical hardware properties (eg
* network cables) or protocol-dependent mechanisms (eg
* USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
+ *
+ * int (*ndo_get_phys_port_id)(struct net_device *dev,
+ * struct netdev_phys_port_id *ppid);
+ * Called to get ID of physical port of this device. If driver does
+ * not implement this, it is assumed that the hw is not able to have
+ * multiple net devices on single physical port.
+ *
+ * void (*ndo_add_vxlan_port)(struct net_device *dev,
+ * sa_family_t sa_family, __u16 port);
+ * Called by vxlan to notiy a driver about the UDP port and socket
+ * address family that vxlan is listnening to. It is called only when
+ * a new port starts listening. The operation is protected by the
+ * vxlan_net->sock_lock.
+ *
+ * void (*ndo_del_vxlan_port)(struct net_device *dev,
+ * sa_family_t sa_family, __u16 port);
+ * Called by vxlan to notify the driver about a UDP port and socket
+ * address family that vxlan is not listening to anymore. The operation
+ * is protected by the vxlan_net->sock_lock.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1060,6 +1089,14 @@ struct net_device_ops {
struct nlmsghdr *nlh);
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
+ int (*ndo_get_phys_port_id)(struct net_device *dev,
+ struct netdev_phys_port_id *ppid);
+ void (*ndo_add_vxlan_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __u16 port);
+ void (*ndo_del_vxlan_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __u16 port);
};
/*
@@ -1107,6 +1144,7 @@ struct net_device {
struct list_head napi_list;
struct list_head unreg_list;
struct list_head upper_dev_list; /* List of upper devices */
+ struct list_head lower_dev_list;
/* currently active device features */
@@ -1633,6 +1671,7 @@ struct packet_offload {
#define NETDEV_NOTIFY_PEERS 0x0013
#define NETDEV_JOIN 0x0014
#define NETDEV_CHANGEUPPER 0x0015
+#define NETDEV_RESEND_IGMP 0x0016
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -1665,9 +1704,6 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern rwlock_t dev_base_lock; /* Device list lock */
-extern seqcount_t devnet_rename_seq; /* Device rename seq */
-
-
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_reverse(net, d) \
@@ -2317,6 +2353,8 @@ extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_change_carrier(struct net_device *,
bool new_carrier);
+extern int dev_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid);
extern int dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev,
struct netdev_queue *txq);
@@ -2749,6 +2787,16 @@ extern int bpf_jit_enable;
extern bool netdev_has_upper_dev(struct net_device *dev,
struct net_device *upper_dev);
extern bool netdev_has_any_upper_dev(struct net_device *dev);
+extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter);
+
+/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \
+ for (iter = &(dev)->upper_dev_list, \
+ upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+ upper; \
+ upper = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+
extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
extern int netdev_upper_dev_link(struct net_device *dev,
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index de70f7b45b68..708fe72ab913 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -314,25 +314,24 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
+extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
struct nf_conn;
+enum ip_conntrack_info;
struct nlattr;
struct nfq_ct_hook {
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct);
int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
-};
-extern struct nfq_ct_hook __rcu *nfq_ct_hook;
-
-struct nfq_ct_nat_hook {
+ int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
+ u32 portid, u32 report);
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
- u32 ctinfo, int off);
+ enum ip_conntrack_info ctinfo, s32 off);
};
-extern struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook;
+extern struct nfq_ct_hook __rcu *nfq_ct_hook;
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 186540d74f36..20888589c09e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -183,6 +183,19 @@ enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
};
+/* These values come from the PCI Express Spec */
+enum pcie_link_width {
+ PCIE_LNK_WIDTH_RESRV = 0x00,
+ PCIE_LNK_X1 = 0x01,
+ PCIE_LNK_X2 = 0x02,
+ PCIE_LNK_X4 = 0x04,
+ PCIE_LNK_X8 = 0x08,
+ PCIE_LNK_X12 = 0x0C,
+ PCIE_LNK_X16 = 0x10,
+ PCIE_LNK_X32 = 0x20,
+ PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
+};
+
/* Based on the PCI Hotplug Spec, but some values are made up by us */
enum pci_bus_speed {
PCI_SPEED_33MHz = 0x00,
@@ -922,6 +935,8 @@ int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
+int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index bd32109e607e..430dd963707b 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -28,19 +28,6 @@
#ifndef _PCI_HOTPLUG_H
#define _PCI_HOTPLUG_H
-/* These values come from the PCI Express Spec */
-enum pcie_link_width {
- PCIE_LNK_WIDTH_RESRV = 0x00,
- PCIE_LNK_X1 = 0x01,
- PCIE_LNK_X2 = 0x02,
- PCIE_LNK_X4 = 0x04,
- PCIE_LNK_X8 = 0x08,
- PCIE_LNK_X12 = 0x0C,
- PCIE_LNK_X16 = 0x10,
- PCIE_LNK_X32 = 0x20,
- PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
-};
-
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
* @owner: The module owner of this structure
diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h
index b7174998c24a..e75dcbf2b230 100644
--- a/include/linux/platform_data/brcmfmac-sdio.h
+++ b/include/linux/platform_data/brcmfmac-sdio.h
@@ -94,6 +94,10 @@ void __init brcmfmac_init_pdata(void)
* Set this to true if the SDIO host controller has higher align requirement
* than 32 bytes for each scatterlist item.
*
+ * sd_head_align: alignment requirement for start of data buffer
+ *
+ * sd_sgentry_align: length alignment requirement for each sg entry
+ *
* power_on: This function is called by the brcmfmac when the module gets
* loaded. This can be particularly useful for low power devices. The platform
* spcific routine may for example decide to power up the complete device.
@@ -121,6 +125,8 @@ struct brcmfmac_sdio_platform_data {
unsigned int oob_irq_nr;
unsigned long oob_irq_flags;
bool broken_sg_support;
+ unsigned short sd_head_align;
+ unsigned short sd_sgentry_align;
void (*power_on)(void);
void (*power_off)(void);
void (*reset)(void);
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index fc305713fc6d..90b5e30c2f22 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -2,23 +2,17 @@
#define __ASM_SH_ETH_H__
#include <linux/phy.h>
+#include <linux/if_ether.h>
enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
-enum {
- SH_ETH_REG_GIGABIT,
- SH_ETH_REG_FAST_RCAR,
- SH_ETH_REG_FAST_SH4,
- SH_ETH_REG_FAST_SH3_SH2
-};
struct sh_eth_plat_data {
int phy;
int edmac_endian;
- int register_type;
phy_interface_t phy_interface;
void (*set_mdio_gate)(void *addr);
- unsigned char mac_addr[6];
+ unsigned char mac_addr[ETH_ALEN];
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
unsigned needs_init:1;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3b71a4e83642..2ddb48d9312c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1805,10 +1805,13 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
*/
static inline void skb_orphan(struct sk_buff *skb)
{
- if (skb->destructor)
+ if (skb->destructor) {
skb->destructor(skb);
- skb->destructor = NULL;
- skb->sk = NULL;
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ } else {
+ BUG_ON(skb->sk);
+ }
}
/**
@@ -1902,8 +1905,8 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
-/*
- * __skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data
+/**
+ * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data
* @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
* @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
* @order: size of the allocation
@@ -2356,6 +2359,10 @@ extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
const struct iovec *from,
int from_offset,
int len);
+extern int zerocopy_sg_from_iovec(struct sk_buff *skb,
+ const struct iovec *frm,
+ int offset,
+ size_t count);
extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
int offset,
const struct iovec *to,
@@ -2385,7 +2392,7 @@ extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
-extern void skb_scrub_packet(struct sk_buff *skb);
+extern void skb_scrub_packet(struct sk_buff *skb, bool xnet);
extern struct sk_buff *skb_segment(struct sk_buff *skb,
netdev_features_t features);
diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h
index 4dde70e74822..eec3efd19beb 100644
--- a/include/linux/smsc911x.h
+++ b/include/linux/smsc911x.h
@@ -22,6 +22,7 @@
#define __LINUX_SMSC911X_H__
#include <linux/phy.h>
+#include <linux/if_ether.h>
/* platform_device configuration data, should be assigned to
* the platform_device's dev.platform_data */
@@ -31,7 +32,7 @@ struct smsc911x_platform_config {
unsigned int flags;
unsigned int shift;
phy_interface_t phy_interface;
- unsigned char mac[6];
+ unsigned char mac[ETH_ALEN];
};
/* Constants for platform_device irq polarity configuration */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 230c04bda3e2..445ef7519dc2 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -313,6 +313,8 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
struct iovec *iov,
int offset,
unsigned int len, __wsum *csump);
+extern unsigned long iov_pages(const struct iovec *iov, int offset,
+ unsigned long nr_segs);
extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 9e495d31516e..bb5deb0feb6b 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -108,6 +108,7 @@ struct plat_stmmacenet_data {
int bugged_jumbo;
int pmt;
int force_sf_dma_mode;
+ int force_thresh_dma_mode;
int riwt_off;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 472120b4fac5..d68633452d9b 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -107,7 +107,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
* only four options will fit in a standard TCP header */
#define TCP_NUM_SACKS 4
-struct tcp_cookie_values;
struct tcp_request_sock_ops;
struct tcp_request_sock {
@@ -238,6 +237,7 @@ struct tcp_sock {
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
+ u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
u32 pushed_seq; /* Last pushed seq, required to talk to windows */
u32 lost_out; /* Lost packets */
u32 sacked_out; /* SACK'd packets */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 2b47e6364e36..9cb2fe8ca944 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -34,6 +34,7 @@ struct usbnet {
struct mutex phy_mutex;
unsigned char suspend_count;
unsigned char pkt_cnt, pkt_err;
+ unsigned short rx_qlen, tx_qlen;
unsigned can_dma_sg:1;
/* i/o info: pipes etc */
@@ -254,4 +255,6 @@ extern void usbnet_link_change(struct usbnet *, bool, bool);
extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
extern void usbnet_status_stop(struct usbnet *dev);
+extern void usbnet_update_max_qlen(struct usbnet *dev);
+
#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h
index b52e44f1bd33..0df24bfcdb38 100644
--- a/include/linux/uwb/spec.h
+++ b/include/linux/uwb/spec.h
@@ -32,6 +32,7 @@
#include <linux/types.h>
#include <linux/bitmap.h>
+#include <linux/if_ether.h>
#define i1480_FW 0x00000303
/* #define i1480_FW 0x00000302 */
@@ -130,7 +131,7 @@ enum { UWB_DRP_BACKOFF_WIN_MAX = 16 };
* it is also used to define headers sent down and up the wire/radio).
*/
struct uwb_mac_addr {
- u8 data[6];
+ u8 data[ETH_ALEN];
} __attribute__((packed));
@@ -568,7 +569,7 @@ struct uwb_rc_evt_confirm {
/* Device Address Management event. [WHCI] section 3.1.3.2. */
struct uwb_rc_evt_dev_addr_mgmt {
struct uwb_rceb rceb;
- u8 baAddr[6];
+ u8 baAddr[ETH_ALEN];
u8 bResultCode;
} __attribute__((packed));
diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h
index 4a1191abd936..f7119ee3977b 100644
--- a/include/media/tveeprom.h
+++ b/include/media/tveeprom.h
@@ -12,6 +12,8 @@ enum tveeprom_audio_processor {
TVEEPROM_AUDPROC_OTHER,
};
+#include <linux/if_ether.h>
+
struct tveeprom {
u32 has_radio;
/* If has_ir == 0, then it is unknown what the IR capabilities are,
@@ -40,7 +42,7 @@ struct tveeprom {
u32 revision;
u32 serial_number;
char rev_str[5];
- u8 MAC_address[6];
+ u8 MAC_address[ETH_ALEN];
};
void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index d9fa68f26c41..9a36d9297114 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -40,8 +40,6 @@
* @close: member function to discard a connection on this transport
* @request: member function to issue a request to the transport
* @cancel: member function to cancel a request (if it hasn't been sent)
- * @cancelled: member function to notify that a cancelled request will not
- * not receive a reply
*
* This is the basic API for a transport module which is registered by the
* transport module with the 9P core network module and used by the client
@@ -60,7 +58,6 @@ struct p9_trans_module {
void (*close) (struct p9_client *);
int (*request) (struct p9_client *, struct p9_req_t *req);
int (*cancel) (struct p9_client *, struct p9_req_t *req);
- int (*cancelled)(struct p9_client *, struct p9_req_t *req);
int (*zc_request)(struct p9_client *, struct p9_req_t *,
char *, char *, int , int, int, int);
};
diff --git a/include/net/act_api.h b/include/net/act_api.h
index b8ffac7b6bab..9e90fdff470d 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -82,36 +82,36 @@ struct tc_action_ops {
int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *);
};
-extern struct tcf_common *tcf_hash_lookup(u32 index,
- struct tcf_hashinfo *hinfo);
-extern void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
-extern int tcf_hash_release(struct tcf_common *p, int bind,
- struct tcf_hashinfo *hinfo);
-extern int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
- int type, struct tc_action *a);
-extern u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo);
-extern int tcf_hash_search(struct tc_action *a, u32 index);
-extern struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
- int bind, struct tcf_hashinfo *hinfo);
-extern struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
- struct tc_action *a, int size,
- int bind, u32 *idx_gen,
- struct tcf_hashinfo *hinfo);
-extern void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo);
+void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+int tcf_hash_release(struct tcf_common *p, int bind,
+ struct tcf_hashinfo *hinfo);
+int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ int type, struct tc_action *a);
+u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo);
+int tcf_hash_search(struct tc_action *a, u32 index);
+struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
+ int bind, struct tcf_hashinfo *hinfo);
+struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
+ struct tc_action *a, int size,
+ int bind, u32 *idx_gen,
+ struct tcf_hashinfo *hinfo);
+void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
-extern int tcf_register_action(struct tc_action_ops *a);
-extern int tcf_unregister_action(struct tc_action_ops *a);
-extern void tcf_action_destroy(struct tc_action *a, int bind);
-extern int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res);
-extern struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *n, int ovr,
- int bind);
-extern struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *n, int ovr,
- int bind);
-extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_copy_stats (struct sk_buff *,struct tc_action *, int);
+int tcf_register_action(struct tc_action_ops *a);
+int tcf_unregister_action(struct tc_action_ops *a);
+void tcf_action_destroy(struct tc_action *a, int bind);
+int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res);
+struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, char *n, int ovr,
+ int bind);
+struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
+ struct nlattr *est, char *n, int ovr,
+ int bind);
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
#endif /* CONFIG_NET_CLS_ACT */
#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index c7b181cb47a6..fb314de2b61b 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -53,51 +53,36 @@ struct prefix_info {
#define IN6_ADDR_HSIZE_SHIFT 4
#define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
-extern int addrconf_init(void);
-extern void addrconf_cleanup(void);
+int addrconf_init(void);
+void addrconf_cleanup(void);
-extern int addrconf_add_ifaddr(struct net *net,
- void __user *arg);
-extern int addrconf_del_ifaddr(struct net *net,
- void __user *arg);
-extern int addrconf_set_dstaddr(struct net *net,
- void __user *arg);
+int addrconf_add_ifaddr(struct net *net, void __user *arg);
+int addrconf_del_ifaddr(struct net *net, void __user *arg);
+int addrconf_set_dstaddr(struct net *net, void __user *arg);
-extern int ipv6_chk_addr(struct net *net,
- const struct in6_addr *addr,
- const struct net_device *dev,
- int strict);
+int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-extern int ipv6_chk_home_addr(struct net *net,
- const struct in6_addr *addr);
+int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
#endif
-extern int ipv6_chk_prefix(const struct in6_addr *addr,
- struct net_device *dev);
-
-extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
- const struct in6_addr *addr,
- struct net_device *dev,
- int strict);
-
-extern int ipv6_dev_get_saddr(struct net *net,
- const struct net_device *dev,
- const struct in6_addr *daddr,
- unsigned int srcprefs,
- struct in6_addr *saddr);
-extern int __ipv6_get_lladdr(struct inet6_dev *idev,
- struct in6_addr *addr,
- unsigned char banned_flags);
-extern int ipv6_get_lladdr(struct net_device *dev,
- struct in6_addr *addr,
- unsigned char banned_flags);
-extern int ipv6_rcv_saddr_equal(const struct sock *sk,
- const struct sock *sk2);
-extern void addrconf_join_solict(struct net_device *dev,
- const struct in6_addr *addr);
-extern void addrconf_leave_solict(struct inet6_dev *idev,
- const struct in6_addr *addr);
+int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
+
+struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
+ const struct in6_addr *addr,
+ struct net_device *dev, int strict);
+
+int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
+ const struct in6_addr *daddr, unsigned int srcprefs,
+ struct in6_addr *saddr);
+int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+ unsigned char banned_flags);
+int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ unsigned char banned_flags);
+int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2);
+void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
+void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
unsigned int unit)
@@ -124,41 +109,58 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
/*
* IPv6 Address Label subsystem (addrlabel.c)
*/
-extern int ipv6_addr_label_init(void);
-extern void ipv6_addr_label_cleanup(void);
-extern void ipv6_addr_label_rtnl_register(void);
-extern u32 ipv6_addr_label(struct net *net,
- const struct in6_addr *addr,
- int type, int ifindex);
+int ipv6_addr_label_init(void);
+void ipv6_addr_label_cleanup(void);
+void ipv6_addr_label_rtnl_register(void);
+u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
+ int type, int ifindex);
/*
* multicast prototypes (mcast.c)
*/
-extern int ipv6_sock_mc_join(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
-extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
-extern void ipv6_sock_mc_close(struct sock *sk);
-extern bool inet6_mc_check(struct sock *sk,
- const struct in6_addr *mc_addr,
- const struct in6_addr *src_addr);
-
-extern int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
-extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
-extern int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
-extern void ipv6_mc_up(struct inet6_dev *idev);
-extern void ipv6_mc_down(struct inet6_dev *idev);
-extern void ipv6_mc_unmap(struct inet6_dev *idev);
-extern void ipv6_mc_remap(struct inet6_dev *idev);
-extern void ipv6_mc_init_dev(struct inet6_dev *idev);
-extern void ipv6_mc_destroy_dev(struct inet6_dev *idev);
-extern void addrconf_dad_failure(struct inet6_ifaddr *ifp);
-
-extern bool ipv6_chk_mcast_addr(struct net_device *dev,
- const struct in6_addr *group,
- const struct in6_addr *src_addr);
-
-extern void ipv6_mc_dad_complete(struct inet6_dev *idev);
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+void ipv6_sock_mc_close(struct sock *sk);
+bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
+ const struct in6_addr *src_addr);
+
+int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
+void ipv6_mc_up(struct inet6_dev *idev);
+void ipv6_mc_down(struct inet6_dev *idev);
+void ipv6_mc_unmap(struct inet6_dev *idev);
+void ipv6_mc_remap(struct inet6_dev *idev);
+void ipv6_mc_init_dev(struct inet6_dev *idev);
+void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+void addrconf_dad_failure(struct inet6_ifaddr *ifp);
+
+bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
+ const struct in6_addr *src_addr);
+
+void ipv6_mc_dad_complete(struct inet6_dev *idev);
+
+/* A stub used by vxlan module. This is ugly, ideally these
+ * symbols should be built into the core kernel.
+ */
+struct ipv6_stub {
+ int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6);
+ void (*udpv6_encap_enable)(void);
+ void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt);
+ struct neigh_table *nd_tbl;
+};
+extern const struct ipv6_stub *ipv6_stub __read_mostly;
+
/*
* identify MLD packets for MLD filter exceptions
*/
@@ -184,29 +186,31 @@ static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
return false;
}
-extern void addrconf_prefix_rcv(struct net_device *dev,
- u8 *opt, int len, bool sllao);
+void addrconf_prefix_rcv(struct net_device *dev,
+ u8 *opt, int len, bool sllao);
/*
* anycast prototypes (anycast.c)
*/
-extern int ipv6_sock_ac_join(struct sock *sk,int ifindex, const struct in6_addr *addr);
-extern int ipv6_sock_ac_drop(struct sock *sk,int ifindex, const struct in6_addr *addr);
-extern void ipv6_sock_ac_close(struct sock *sk);
-
-extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
-extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
-extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
+int ipv6_sock_ac_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+void ipv6_sock_ac_close(struct sock *sk);
+
+int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
/* Device notifier */
-extern int register_inet6addr_notifier(struct notifier_block *nb);
-extern int unregister_inet6addr_notifier(struct notifier_block *nb);
-extern int inet6addr_notifier_call_chain(unsigned long val, void *v);
+int register_inet6addr_notifier(struct notifier_block *nb);
+int unregister_inet6addr_notifier(struct notifier_block *nb);
+int inet6addr_notifier_call_chain(unsigned long val, void *v);
-extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
- struct ipv6_devconf *devconf);
+void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
+ struct ipv6_devconf *devconf);
/**
* __in6_dev_get - get inet6_dev pointer from netdevice
@@ -240,7 +244,7 @@ static inline struct inet6_dev *in6_dev_get(const struct net_device *dev)
return idev;
}
-extern void in6_dev_finish_destroy(struct inet6_dev *idev);
+void in6_dev_finish_destroy(struct inet6_dev *idev);
static inline void in6_dev_put(struct inet6_dev *idev)
{
@@ -258,7 +262,7 @@ static inline void in6_dev_hold(struct inet6_dev *idev)
atomic_inc(&idev->refcnt);
}
-extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
+void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
static inline void in6_ifa_put(struct inet6_ifaddr *ifp)
{
@@ -340,8 +344,8 @@ static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
}
#ifdef CONFIG_PROC_FS
-extern int if6_proc_init(void);
-extern void if6_proc_exit(void);
+int if6_proc_init(void);
+void if6_proc_exit(void);
#endif
#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 03e6e9453623..e797d45a5ae6 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -31,24 +31,21 @@ enum {
typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long,
struct sk_buff *);
-extern void rxrpc_kernel_intercept_rx_messages(struct socket *,
- rxrpc_interceptor_t);
-extern struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
- struct sockaddr_rxrpc *,
- struct key *,
- unsigned long,
- gfp_t);
-extern int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *,
- size_t);
-extern void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
-extern void rxrpc_kernel_end_call(struct rxrpc_call *);
-extern bool rxrpc_kernel_is_data_last(struct sk_buff *);
-extern u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
-extern int rxrpc_kernel_get_error_number(struct sk_buff *);
-extern void rxrpc_kernel_data_delivered(struct sk_buff *);
-extern void rxrpc_kernel_free_skb(struct sk_buff *);
-extern struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *,
- unsigned long);
-extern int rxrpc_kernel_reject_call(struct socket *);
+void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t);
+struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
+ struct sockaddr_rxrpc *,
+ struct key *,
+ unsigned long,
+ gfp_t);
+int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
+void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
+void rxrpc_kernel_end_call(struct rxrpc_call *);
+bool rxrpc_kernel_is_data_last(struct sk_buff *);
+u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
+int rxrpc_kernel_get_error_number(struct sk_buff *);
+void rxrpc_kernel_data_delivered(struct sk_buff *);
+void rxrpc_kernel_free_skb(struct sk_buff *);
+struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
+int rxrpc_kernel_reject_call(struct socket *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index dbdfd2b0f3b3..a175ba4a7adb 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -6,12 +6,12 @@
#include <linux/mutex.h>
#include <net/sock.h>
-extern void unix_inflight(struct file *fp);
-extern void unix_notinflight(struct file *fp);
-extern void unix_gc(void);
-extern void wait_for_unix_gc(void);
-extern struct sock *unix_get_socket(struct file *filp);
-extern struct sock *unix_peer_get(struct sock *);
+void unix_inflight(struct file *fp);
+void unix_notinflight(struct file *fp);
+void unix_gc(void);
+void wait_for_unix_gc(void);
+struct sock *unix_get_socket(struct file *filp);
+struct sock *unix_peer_get(struct sock *);
#define UNIX_HASH_SIZE 256
#define UNIX_HASH_BITS 8
@@ -35,6 +35,7 @@ struct unix_skb_parms {
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Security ID */
#endif
+ u32 consumed;
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
@@ -71,8 +72,8 @@ long unix_inq_len(struct sock *sk);
long unix_outq_len(struct sock *sk);
#ifdef CONFIG_SYSCTL
-extern int unix_sysctl_register(struct net *net);
-extern void unix_sysctl_unregister(struct net *net);
+int unix_sysctl_register(struct net *net);
+void unix_sysctl_unregister(struct net *net);
#else
static inline int unix_sysctl_register(struct net *net) { return 0; }
static inline void unix_sysctl_unregister(struct net *net) {}
diff --git a/net/vmw_vsock/af_vsock.h b/include/net/af_vsock.h
index 7d64d3609ec9..7d64d3609ec9 100644
--- a/net/vmw_vsock/af_vsock.h
+++ b/include/net/af_vsock.h
diff --git a/include/net/arp.h b/include/net/arp.h
index b630dae03411..7509d9da4e36 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -46,22 +46,22 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
return n;
}
-extern void arp_init(void);
-extern int arp_find(unsigned char *haddr, struct sk_buff *skb);
-extern int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
-extern void arp_send(int type, int ptype, __be32 dest_ip,
- struct net_device *dev, __be32 src_ip,
- const unsigned char *dest_hw,
- const unsigned char *src_hw, const unsigned char *th);
-extern int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
-extern void arp_ifdown(struct net_device *dev);
+void arp_init(void);
+int arp_find(unsigned char *haddr, struct sk_buff *skb);
+int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
+void arp_send(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw, const unsigned char *th);
+int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
+void arp_ifdown(struct net_device *dev);
-extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
- struct net_device *dev, __be32 src_ip,
- const unsigned char *dest_hw,
- const unsigned char *src_hw,
- const unsigned char *target_hw);
-extern void arp_xmit(struct sk_buff *skb);
+struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw,
+ const unsigned char *target_hw);
+void arp_xmit(struct sk_buff *skb);
int arp_invalidate(struct net_device *dev, __be32 ip);
#endif /* _ARP_H */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 89ed9ac5701f..bf0396e9a5d3 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -195,7 +195,7 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
atomic_inc(&ax25_rt->refcount);
}
-extern void __ax25_put_route(ax25_route *ax25_rt);
+void __ax25_put_route(ax25_route *ax25_rt);
static inline void ax25_put_route(ax25_route *ax25_rt)
{
@@ -272,30 +272,31 @@ static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev
/* af_ax25.c */
extern struct hlist_head ax25_list;
extern spinlock_t ax25_list_lock;
-extern void ax25_cb_add(ax25_cb *);
+void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
-extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
-extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
-extern void ax25_destroy_socket(ax25_cb *);
-extern ax25_cb * __must_check ax25_create_cb(void);
-extern void ax25_fillin_cb(ax25_cb *, ax25_dev *);
-extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
+ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *,
+ struct net_device *);
+void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
+void ax25_destroy_socket(ax25_cb *);
+ax25_cb * __must_check ax25_create_cb(void);
+void ax25_fillin_cb(ax25_cb *, ax25_dev *);
+struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
/* ax25_addr.c */
extern const ax25_address ax25_bcast;
extern const ax25_address ax25_defaddr;
extern const ax25_address null_ax25_address;
-extern char *ax2asc(char *buf, const ax25_address *);
-extern void asc2ax(ax25_address *addr, const char *callsign);
-extern int ax25cmp(const ax25_address *, const ax25_address *);
-extern int ax25digicmp(const ax25_digi *, const ax25_digi *);
-extern const unsigned char *ax25_addr_parse(const unsigned char *, int,
+char *ax2asc(char *buf, const ax25_address *);
+void asc2ax(ax25_address *addr, const char *callsign);
+int ax25cmp(const ax25_address *, const ax25_address *);
+int ax25digicmp(const ax25_digi *, const ax25_digi *);
+const unsigned char *ax25_addr_parse(const unsigned char *, int,
ax25_address *, ax25_address *, ax25_digi *, int *, int *);
-extern int ax25_addr_build(unsigned char *, const ax25_address *,
- const ax25_address *, const ax25_digi *, int, int);
-extern int ax25_addr_size(const ax25_digi *);
-extern void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+int ax25_addr_build(unsigned char *, const ax25_address *,
+ const ax25_address *, const ax25_digi *, int, int);
+int ax25_addr_size(const ax25_digi *);
+void ax25_digi_invert(const ax25_digi *, ax25_digi *);
/* ax25_dev.c */
extern ax25_dev *ax25_dev_list;
@@ -306,33 +307,33 @@ static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
return dev->ax25_ptr;
}
-extern ax25_dev *ax25_addr_ax25dev(ax25_address *);
-extern void ax25_dev_device_up(struct net_device *);
-extern void ax25_dev_device_down(struct net_device *);
-extern int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
-extern struct net_device *ax25_fwd_dev(struct net_device *);
-extern void ax25_dev_free(void);
+ax25_dev *ax25_addr_ax25dev(ax25_address *);
+void ax25_dev_device_up(struct net_device *);
+void ax25_dev_device_down(struct net_device *);
+int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
+struct net_device *ax25_fwd_dev(struct net_device *);
+void ax25_dev_free(void);
/* ax25_ds_in.c */
-extern int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
+int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
/* ax25_ds_subr.c */
-extern void ax25_ds_nr_error_recovery(ax25_cb *);
-extern void ax25_ds_enquiry_response(ax25_cb *);
-extern void ax25_ds_establish_data_link(ax25_cb *);
-extern void ax25_dev_dama_off(ax25_dev *);
-extern void ax25_dama_on(ax25_cb *);
-extern void ax25_dama_off(ax25_cb *);
+void ax25_ds_nr_error_recovery(ax25_cb *);
+void ax25_ds_enquiry_response(ax25_cb *);
+void ax25_ds_establish_data_link(ax25_cb *);
+void ax25_dev_dama_off(ax25_dev *);
+void ax25_dama_on(ax25_cb *);
+void ax25_dama_off(ax25_cb *);
/* ax25_ds_timer.c */
-extern void ax25_ds_setup_timer(ax25_dev *);
-extern void ax25_ds_set_timer(ax25_dev *);
-extern void ax25_ds_del_timer(ax25_dev *);
-extern void ax25_ds_timer(ax25_cb *);
-extern void ax25_ds_t1_timeout(ax25_cb *);
-extern void ax25_ds_heartbeat_expiry(ax25_cb *);
-extern void ax25_ds_t3timer_expiry(ax25_cb *);
-extern void ax25_ds_idletimer_expiry(ax25_cb *);
+void ax25_ds_setup_timer(ax25_dev *);
+void ax25_ds_set_timer(ax25_dev *);
+void ax25_ds_del_timer(ax25_dev *);
+void ax25_ds_timer(ax25_cb *);
+void ax25_ds_t1_timeout(ax25_cb *);
+void ax25_ds_heartbeat_expiry(ax25_cb *);
+void ax25_ds_t3timer_expiry(ax25_cb *);
+void ax25_ds_idletimer_expiry(ax25_cb *);
/* ax25_iface.c */
@@ -342,107 +343,109 @@ struct ax25_protocol {
int (*func)(struct sk_buff *, ax25_cb *);
};
-extern void ax25_register_pid(struct ax25_protocol *ap);
-extern void ax25_protocol_release(unsigned int);
+void ax25_register_pid(struct ax25_protocol *ap);
+void ax25_protocol_release(unsigned int);
struct ax25_linkfail {
struct hlist_node lf_node;
void (*func)(ax25_cb *, int);
};
-extern void ax25_linkfail_register(struct ax25_linkfail *lf);
-extern void ax25_linkfail_release(struct ax25_linkfail *lf);
-extern int __must_check ax25_listen_register(ax25_address *,
- struct net_device *);
-extern void ax25_listen_release(ax25_address *, struct net_device *);
-extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
-extern int ax25_listen_mine(ax25_address *, struct net_device *);
-extern void ax25_link_failed(ax25_cb *, int);
-extern int ax25_protocol_is_registered(unsigned int);
+void ax25_linkfail_register(struct ax25_linkfail *lf);
+void ax25_linkfail_release(struct ax25_linkfail *lf);
+int __must_check ax25_listen_register(ax25_address *, struct net_device *);
+void ax25_listen_release(ax25_address *, struct net_device *);
+int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
+int ax25_listen_mine(ax25_address *, struct net_device *);
+void ax25_link_failed(ax25_cb *, int);
+int ax25_protocol_is_registered(unsigned int);
/* ax25_in.c */
-extern int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
-extern int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
+int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
+int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
+ struct net_device *);
/* ax25_ip.c */
-extern int ax25_hard_header(struct sk_buff *, struct net_device *,
- unsigned short, const void *,
- const void *, unsigned int);
-extern int ax25_rebuild_header(struct sk_buff *);
+int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short,
+ const void *, const void *, unsigned int);
+int ax25_rebuild_header(struct sk_buff *);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
-extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
-extern void ax25_output(ax25_cb *, int, struct sk_buff *);
-extern void ax25_kick(ax25_cb *);
-extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
-extern void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int ax25_check_iframes_acked(ax25_cb *, unsigned short);
+ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
+ ax25_digi *, struct net_device *);
+void ax25_output(ax25_cb *, int, struct sk_buff *);
+void ax25_kick(ax25_cb *);
+void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
+void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
+int ax25_check_iframes_acked(ax25_cb *, unsigned short);
/* ax25_route.c */
-extern void ax25_rt_device_down(struct net_device *);
-extern int ax25_rt_ioctl(unsigned int, void __user *);
+void ax25_rt_device_down(struct net_device *);
+int ax25_rt_ioctl(unsigned int, void __user *);
extern const struct file_operations ax25_route_fops;
-extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
-extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
-extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
-extern void ax25_rt_free(void);
+ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
+int ax25_rt_autobind(ax25_cb *, ax25_address *);
+struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *,
+ ax25_address *, ax25_digi *);
+void ax25_rt_free(void);
/* ax25_std_in.c */
-extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
+int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
/* ax25_std_subr.c */
-extern void ax25_std_nr_error_recovery(ax25_cb *);
-extern void ax25_std_establish_data_link(ax25_cb *);
-extern void ax25_std_transmit_enquiry(ax25_cb *);
-extern void ax25_std_enquiry_response(ax25_cb *);
-extern void ax25_std_timeout_response(ax25_cb *);
+void ax25_std_nr_error_recovery(ax25_cb *);
+void ax25_std_establish_data_link(ax25_cb *);
+void ax25_std_transmit_enquiry(ax25_cb *);
+void ax25_std_enquiry_response(ax25_cb *);
+void ax25_std_timeout_response(ax25_cb *);
/* ax25_std_timer.c */
-extern void ax25_std_heartbeat_expiry(ax25_cb *);
-extern void ax25_std_t1timer_expiry(ax25_cb *);
-extern void ax25_std_t2timer_expiry(ax25_cb *);
-extern void ax25_std_t3timer_expiry(ax25_cb *);
-extern void ax25_std_idletimer_expiry(ax25_cb *);
+void ax25_std_heartbeat_expiry(ax25_cb *);
+void ax25_std_t1timer_expiry(ax25_cb *);
+void ax25_std_t2timer_expiry(ax25_cb *);
+void ax25_std_t3timer_expiry(ax25_cb *);
+void ax25_std_idletimer_expiry(ax25_cb *);
/* ax25_subr.c */
-extern void ax25_clear_queues(ax25_cb *);
-extern void ax25_frames_acked(ax25_cb *, unsigned short);
-extern void ax25_requeue_frames(ax25_cb *);
-extern int ax25_validate_nr(ax25_cb *, unsigned short);
-extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
-extern void ax25_send_control(ax25_cb *, int, int, int);
-extern void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, ax25_digi *);
-extern void ax25_calculate_t1(ax25_cb *);
-extern void ax25_calculate_rtt(ax25_cb *);
-extern void ax25_disconnect(ax25_cb *, int);
+void ax25_clear_queues(ax25_cb *);
+void ax25_frames_acked(ax25_cb *, unsigned short);
+void ax25_requeue_frames(ax25_cb *);
+int ax25_validate_nr(ax25_cb *, unsigned short);
+int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
+void ax25_send_control(ax25_cb *, int, int, int);
+void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *,
+ ax25_digi *);
+void ax25_calculate_t1(ax25_cb *);
+void ax25_calculate_rtt(ax25_cb *);
+void ax25_disconnect(ax25_cb *, int);
/* ax25_timer.c */
-extern void ax25_setup_timers(ax25_cb *);
-extern void ax25_start_heartbeat(ax25_cb *);
-extern void ax25_start_t1timer(ax25_cb *);
-extern void ax25_start_t2timer(ax25_cb *);
-extern void ax25_start_t3timer(ax25_cb *);
-extern void ax25_start_idletimer(ax25_cb *);
-extern void ax25_stop_heartbeat(ax25_cb *);
-extern void ax25_stop_t1timer(ax25_cb *);
-extern void ax25_stop_t2timer(ax25_cb *);
-extern void ax25_stop_t3timer(ax25_cb *);
-extern void ax25_stop_idletimer(ax25_cb *);
-extern int ax25_t1timer_running(ax25_cb *);
-extern unsigned long ax25_display_timer(struct timer_list *);
+void ax25_setup_timers(ax25_cb *);
+void ax25_start_heartbeat(ax25_cb *);
+void ax25_start_t1timer(ax25_cb *);
+void ax25_start_t2timer(ax25_cb *);
+void ax25_start_t3timer(ax25_cb *);
+void ax25_start_idletimer(ax25_cb *);
+void ax25_stop_heartbeat(ax25_cb *);
+void ax25_stop_t1timer(ax25_cb *);
+void ax25_stop_t2timer(ax25_cb *);
+void ax25_stop_t3timer(ax25_cb *);
+void ax25_stop_idletimer(ax25_cb *);
+int ax25_t1timer_running(ax25_cb *);
+unsigned long ax25_display_timer(struct timer_list *);
/* ax25_uid.c */
extern int ax25_uid_policy;
-extern ax25_uid_assoc *ax25_findbyuid(kuid_t);
-extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
+ax25_uid_assoc *ax25_findbyuid(kuid_t);
+int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
extern const struct file_operations ax25_uid_fops;
-extern void ax25_uid_free(void);
+void ax25_uid_free(void);
/* sysctl_net_ax25.c */
#ifdef CONFIG_SYSCTL
-extern int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
-extern void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
+int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
+void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
#else
static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; }
static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {}
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 10eb9b389014..10d43d8c7037 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -107,6 +107,14 @@ struct bt_power {
*/
#define BT_CHANNEL_POLICY_AMP_PREFERRED 2
+#define BT_VOICE 11
+struct bt_voice {
+ __u16 setting;
+};
+
+#define BT_VOICE_TRANSPARENT 0x0003
+#define BT_VOICE_CVSD_16BIT 0x0060
+
__printf(1, 2)
int bt_info(const char *fmt, ...);
__printf(1, 2)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 3c592cf473da..aaeaf0938ec0 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -238,6 +238,7 @@ enum {
#define LMP_CVSD 0x01
#define LMP_PSCHEME 0x02
#define LMP_PCONTROL 0x04
+#define LMP_TRANSPARENT 0x08
#define LMP_RSSI_INQ 0x40
#define LMP_ESCO 0x80
@@ -296,6 +297,12 @@ enum {
#define HCI_AT_GENERAL_BONDING 0x04
#define HCI_AT_GENERAL_BONDING_MITM 0x05
+/* I/O capabilities */
+#define HCI_IO_DISPLAY_ONLY 0x00
+#define HCI_IO_DISPLAY_YESNO 0x01
+#define HCI_IO_KEYBOARD_ONLY 0x02
+#define HCI_IO_NO_INPUT_OUTPUT 0x03
+
/* Link Key types */
#define HCI_LK_COMBINATION 0x00
#define HCI_LK_LOCAL_UNIT 0x01
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index f77885ea78c2..3ede820d328f 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -320,6 +320,7 @@ struct hci_conn {
__u32 passkey_notify;
__u8 passkey_entered;
__u16 disc_timeout;
+ __u16 setting;
unsigned long flags;
__u8 remote_cap;
@@ -569,7 +570,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
}
void hci_disconnect(struct hci_conn *conn, __u8 reason);
-void hci_setup_sync(struct hci_conn *conn, __u16 handle);
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
@@ -584,6 +585,8 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u8 dst_type, __u8 sec_level, __u8 auth_type);
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u16 setting);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
@@ -797,6 +800,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
+#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
/* ----- Extended LMP capabilities ----- */
#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
@@ -1213,4 +1217,8 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
u8 bdaddr_to_le(u8 bdaddr_type);
+#define SCO_AIRMODE_MASK 0x0003
+#define SCO_AIRMODE_CVSD 0x0000
+#define SCO_AIRMODE_TRANSP 0x0003
+
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 1e35c43657c8..e252a31ee6b6 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -73,6 +73,7 @@ struct sco_conn {
struct sco_pinfo {
struct bt_sock bt;
__u32 flags;
+ __u16 setting;
struct sco_conn *conn;
};
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 7b0730aeb892..cb710913d5c8 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -461,6 +461,33 @@ ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
}
/**
+ * ieee80211_chandef_max_power - maximum transmission power for the chandef
+ *
+ * In some regulations, the transmit power may depend on the configured channel
+ * bandwidth which may be defined as dBm/MHz. This function returns the actual
+ * max_power for non-standard (20 MHz) channels.
+ *
+ * @chandef: channel definition for the channel
+ *
+ * Returns: maximum allowed transmission power in dBm for the chandef
+ */
+static inline int
+ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return min(chandef->chan->max_reg_power - 6,
+ chandef->chan->max_power);
+ case NL80211_CHAN_WIDTH_10:
+ return min(chandef->chan->max_reg_power - 3,
+ chandef->chan->max_power);
+ default:
+ break;
+ }
+ return chandef->chan->max_power;
+}
+
+/**
* enum survey_info_flags - survey information flags
*
* @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
@@ -490,7 +517,7 @@ enum survey_info_flags {
* @channel: the channel this survey record reports, mandatory
* @filled: bitflag of flags from &enum survey_info_flags
* @noise: channel noise in dBm. This and all following fields are
- * optional
+ * optional
* @channel_time: amount of time in ms the radio spent on the channel
* @channel_time_busy: amount of time the primary channel was sensed busy
* @channel_time_ext_busy: amount of time the extension channel was sensed busy
@@ -546,9 +573,9 @@ struct cfg80211_crypto_settings {
/**
* struct cfg80211_beacon_data - beacon data
* @head: head portion of beacon (before TIM IE)
- * or %NULL if not changed
+ * or %NULL if not changed
* @tail: tail portion of beacon (after TIM IE)
- * or %NULL if not changed
+ * or %NULL if not changed
* @head_len: length of @head
* @tail_len: length of @tail
* @beacon_ies: extra information element(s) to add into Beacon frames or %NULL
@@ -639,6 +666,30 @@ struct cfg80211_ap_settings {
};
/**
+ * struct cfg80211_csa_settings - channel switch settings
+ *
+ * Used for channel switch
+ *
+ * @chandef: defines the channel to use after the switch
+ * @beacon_csa: beacon data while performing the switch
+ * @counter_offset_beacon: offset for the counter within the beacon (tail)
+ * @counter_offset_presp: offset for the counter within the probe response
+ * @beacon_after: beacon data to be used on the new channel
+ * @radar_required: whether radar detection is required on the new channel
+ * @block_tx: whether transmissions should be blocked while changing
+ * @count: number of beacons until switch
+ */
+struct cfg80211_csa_settings {
+ struct cfg80211_chan_def chandef;
+ struct cfg80211_beacon_data beacon_csa;
+ u16 counter_offset_beacon, counter_offset_presp;
+ struct cfg80211_beacon_data beacon_after;
+ bool radar_required;
+ bool block_tx;
+ u8 count;
+};
+
+/**
* enum station_parameters_apply_mask - station parameter values to apply
* @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
* @STATION_PARAM_APPLY_CAPABILITY: apply new capability
@@ -764,7 +815,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @STATION_INFO_PLINK_STATE: @plink_state filled
* @STATION_INFO_SIGNAL: @signal filled
* @STATION_INFO_TX_BITRATE: @txrate fields are filled
- * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
+ * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
* @STATION_INFO_RX_PACKETS: @rx_packets filled with 32-bit value
* @STATION_INFO_TX_PACKETS: @tx_packets filled with 32-bit value
* @STATION_INFO_TX_RETRIES: @tx_retries filled
@@ -1285,6 +1336,7 @@ struct cfg80211_ssid {
* @n_ssids: number of SSIDs
* @channels: channels to scan on.
* @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
* @flags: bit field of flags controlling operation
@@ -1300,6 +1352,7 @@ struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
+ enum nl80211_bss_scan_width scan_width;
const u8 *ie;
size_t ie_len;
u32 flags;
@@ -1333,6 +1386,7 @@ struct cfg80211_match_set {
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
* @n_ssids: number of SSIDs
* @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
* @interval: interval between each scheduled scan cycle
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
@@ -1352,6 +1406,7 @@ struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
+ enum nl80211_bss_scan_width scan_width;
u32 interval;
const u8 *ie;
size_t ie_len;
@@ -1403,6 +1458,7 @@ struct cfg80211_bss_ies {
* for use in scan results and similar.
*
* @channel: channel this BSS is on
+ * @scan_width: width of the control channel
* @bssid: BSSID of the BSS
* @beacon_interval: the beacon interval as from the frame
* @capability: the capability field in host byte order
@@ -1424,6 +1480,7 @@ struct cfg80211_bss_ies {
*/
struct cfg80211_bss {
struct ieee80211_channel *channel;
+ enum nl80211_bss_scan_width scan_width;
const struct cfg80211_bss_ies __rcu *ies;
const struct cfg80211_bss_ies __rcu *beacon_ies;
@@ -1509,7 +1566,7 @@ enum cfg80211_assoc_req_flags {
* @prev_bssid: previous BSSID, if not %NULL use reassociate frame
* @flags: See &enum cfg80211_assoc_req_flags
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
- * will be used in ht_capa. Un-supported values will be ignored.
+ * will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT capability override
* @vht_capa_mask: VHT capability mask indicating which fields to use
@@ -1592,6 +1649,9 @@ struct cfg80211_disassoc_request {
* user space. Otherwise, port is marked authorized by default.
* @basic_rates: bitmap of basic rates to use when creating the IBSS
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
*/
struct cfg80211_ibss_params {
u8 *ssid;
@@ -1605,6 +1665,8 @@ struct cfg80211_ibss_params {
bool privacy;
bool control_port;
int mcast_rate[IEEE80211_NUM_BANDS];
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
};
/**
@@ -1630,9 +1692,9 @@ struct cfg80211_ibss_params {
* @key: WEP key for shared key authentication
* @flags: See &enum cfg80211_assoc_req_flags
* @bg_scan_period: Background scan period in seconds
- * or -1 to indicate that default value is to be used.
+ * or -1 to indicate that default value is to be used.
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
- * will be used in ht_capa. Un-supported values will be ignored.
+ * will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT Capability overrides
* @vht_capa_mask: The bits of vht_capa which are to be used.
@@ -1698,7 +1760,7 @@ struct cfg80211_pmksa {
};
/**
- * struct cfg80211_wowlan_trig_pkt_pattern - packet pattern
+ * struct cfg80211_pkt_pattern - packet pattern
* @mask: bitmask where to match pattern and where to ignore bytes,
* one bit per byte, in same format as nl80211
* @pattern: bytes to match where bitmask is 1
@@ -1708,7 +1770,7 @@ struct cfg80211_pmksa {
* Internal note: @mask and @pattern are allocated in one chunk of
* memory, free @mask only!
*/
-struct cfg80211_wowlan_trig_pkt_pattern {
+struct cfg80211_pkt_pattern {
u8 *mask, *pattern;
int pattern_len;
int pkt_offset;
@@ -1770,12 +1832,41 @@ struct cfg80211_wowlan {
bool any, disconnect, magic_pkt, gtk_rekey_failure,
eap_identity_req, four_way_handshake,
rfkill_release;
- struct cfg80211_wowlan_trig_pkt_pattern *patterns;
+ struct cfg80211_pkt_pattern *patterns;
struct cfg80211_wowlan_tcp *tcp;
int n_patterns;
};
/**
+ * struct cfg80211_coalesce_rules - Coalesce rule parameters
+ *
+ * This structure defines coalesce rule for the device.
+ * @delay: maximum coalescing delay in msecs.
+ * @condition: condition for packet coalescence.
+ * see &enum nl80211_coalesce_condition.
+ * @patterns: array of packet patterns
+ * @n_patterns: number of patterns
+ */
+struct cfg80211_coalesce_rules {
+ int delay;
+ enum nl80211_coalesce_condition condition;
+ struct cfg80211_pkt_pattern *patterns;
+ int n_patterns;
+};
+
+/**
+ * struct cfg80211_coalesce - Packet coalescing settings
+ *
+ * This structure defines coalescing settings.
+ * @rules: array of coalesce rules
+ * @n_rules: number of rules
+ */
+struct cfg80211_coalesce {
+ struct cfg80211_coalesce_rules *rules;
+ int n_rules;
+};
+
+/**
* struct cfg80211_wowlan_wakeup - wakeup report
* @disconnect: woke up by getting disconnected
* @magic_pkt: woke up by receiving magic packet
@@ -1990,7 +2081,7 @@ struct cfg80211_update_ft_ies_params {
* @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management
* frame on another channel
*
- * @testmode_cmd: run a test mode command
+ * @testmode_cmd: run a test mode command; @wdev may be %NULL
* @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be
* used by the function, but 0 and 1 must not be touched. Additionally,
* return error codes other than -ENOBUFS and -ENOENT will terminate the
@@ -2071,6 +2162,9 @@ struct cfg80211_update_ft_ies_params {
* driver can take the most appropriate actions.
* @crit_proto_stop: Indicates critical protocol no longer needs increased link
* reliability. This operation can not fail.
+ * @set_coalesce: Set coalesce parameters.
+ *
+ * @channel_switch: initiate channel-switch procedure (with CSA)
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2196,7 +2290,8 @@ struct cfg80211_ops {
void (*rfkill_poll)(struct wiphy *wiphy);
#ifdef CONFIG_NL80211_TESTMODE
- int (*testmode_cmd)(struct wiphy *wiphy, void *data, int len);
+ int (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len);
int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len);
@@ -2306,6 +2401,12 @@ struct cfg80211_ops {
u16 duration);
void (*crit_proto_stop)(struct wiphy *wiphy,
struct wireless_dev *wdev);
+ int (*set_coalesce)(struct wiphy *wiphy,
+ struct cfg80211_coalesce *coalesce);
+
+ int (*channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_csa_settings *params);
};
/*
@@ -2371,6 +2472,8 @@ struct cfg80211_ops {
* @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
* @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
+ * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
+ * beaconing mode (AP, IBSS, Mesh, ...).
*/
enum wiphy_flags {
WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
@@ -2395,6 +2498,7 @@ enum wiphy_flags {
WIPHY_FLAG_OFFCHAN_TX = BIT(20),
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
};
/**
@@ -2532,6 +2636,25 @@ struct wiphy_wowlan_support {
};
/**
+ * struct wiphy_coalesce_support - coalesce support data
+ * @n_rules: maximum number of coalesce rules
+ * @max_delay: maximum supported coalescing delay in msecs
+ * @n_patterns: number of supported patterns in a rule
+ * (see nl80211.h for the pattern definition)
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ * @max_pkt_offset: maximum Rx packet offset
+ */
+struct wiphy_coalesce_support {
+ int n_rules;
+ int max_delay;
+ int n_patterns;
+ int pattern_max_len;
+ int pattern_min_len;
+ int max_pkt_offset;
+};
+
+/**
* struct wiphy - wireless hardware description
* @reg_notifier: the driver's regulatory notification callback,
* note that if your driver uses wiphy_apply_custom_regulatory()
@@ -2641,6 +2764,7 @@ struct wiphy_wowlan_support {
* 802.11-2012 8.4.2.29 for the defined fields.
* @extended_capabilities_mask: mask of the valid values
* @extended_capabilities_len: length of the extended capabilities
+ * @coalesce: packet coalescing support information
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -2750,6 +2874,8 @@ struct wiphy {
const struct iw_handler_def *wext;
#endif
+ const struct wiphy_coalesce_support *coalesce;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -2841,7 +2967,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv);
*
* Return: A non-negative wiphy index or a negative error code.
*/
-extern int wiphy_register(struct wiphy *wiphy);
+int wiphy_register(struct wiphy *wiphy);
/**
* wiphy_unregister - deregister a wiphy from cfg80211
@@ -2852,14 +2978,14 @@ extern int wiphy_register(struct wiphy *wiphy);
* pointer, but the call may sleep to wait for an outstanding
* request that is being handled.
*/
-extern void wiphy_unregister(struct wiphy *wiphy);
+void wiphy_unregister(struct wiphy *wiphy);
/**
* wiphy_free - free wiphy
*
* @wiphy: The wiphy to free
*/
-extern void wiphy_free(struct wiphy *wiphy);
+void wiphy_free(struct wiphy *wiphy);
/* internal structs */
struct cfg80211_conn;
@@ -3014,14 +3140,14 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
* @band: band, necessary due to channel number overlap
* Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
*/
-extern int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
+int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
* @freq: center frequency
* Return: The corresponding channel, or 0 if the conversion failed.
*/
-extern int ieee80211_frequency_to_channel(int freq);
+int ieee80211_frequency_to_channel(int freq);
/*
* Name indirection necessary because the ieee80211 code also has
@@ -3030,8 +3156,8 @@ extern int ieee80211_frequency_to_channel(int freq);
* to include both header files you'll (rightfully!) get a symbol
* clash.
*/
-extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
- int freq);
+struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
+ int freq);
/**
* ieee80211_get_channel - get channel struct from wiphy for specified frequency
* @wiphy: the struct wiphy to get the channel for
@@ -3063,11 +3189,13 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
/**
* ieee80211_mandatory_rates - get mandatory rates for a given band
* @sband: the band to look for rates in
+ * @scan_width: width of the control channel
*
* This function returns a bitmap of the mandatory rates for the given
* band, bits are set according to the rate position in the bitrates array.
*/
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband);
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
+ enum nl80211_bss_scan_width scan_width);
/*
* Radiotap parsing functions -- for controlled injection support
@@ -3141,13 +3269,14 @@ struct ieee80211_radiotap_iterator {
int _reset_on_ext;
};
-extern int ieee80211_radiotap_iterator_init(
- struct ieee80211_radiotap_iterator *iterator,
- struct ieee80211_radiotap_header *radiotap_header,
- int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns);
+int
+ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator,
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length,
+ const struct ieee80211_radiotap_vendor_namespaces *vns);
-extern int ieee80211_radiotap_iterator_next(
- struct ieee80211_radiotap_iterator *iterator);
+int
+ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator);
extern const unsigned char rfc1042_header[6];
@@ -3307,7 +3436,7 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
*
* Return: 0 on success. -ENOMEM.
*/
-extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
+int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
@@ -3321,9 +3450,8 @@ extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
* default channel settings will be disregarded. If no rule is found for a
* channel on the regulatory domain the channel will be disabled.
*/
-extern void wiphy_apply_custom_regulatory(
- struct wiphy *wiphy,
- const struct ieee80211_regdomain *regd);
+void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
+ const struct ieee80211_regdomain *regd);
/**
* freq_reg_info - get regulatory information for the given frequency
@@ -3379,10 +3507,11 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
/**
- * cfg80211_inform_bss_frame - inform cfg80211 of a received BSS frame
+ * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
*
* @wiphy: the wiphy reporting the BSS
* @channel: The channel the frame was received on
+ * @scan_width: width of the control channel
* @mgmt: the management frame (probe response or beacon)
* @len: length of the management frame
* @signal: the signal strength, type depends on the wiphy's signal_type
@@ -3395,16 +3524,29 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
* Or %NULL on error.
*/
struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ s32 signal, gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss_frame(struct wiphy *wiphy,
struct ieee80211_channel *channel,
struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp);
+ s32 signal, gfp_t gfp)
+{
+ return cfg80211_inform_bss_width_frame(wiphy, channel,
+ NL80211_BSS_CHAN_WIDTH_20,
+ mgmt, len, signal, gfp);
+}
/**
* cfg80211_inform_bss - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
* @channel: The channel the frame was received on
+ * @scan_width: width of the control channel
* @bssid: the BSSID of the BSS
* @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
* @capability: the capability field sent by the peer
@@ -3421,11 +3563,26 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
* Or %NULL on error.
*/
struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ s32 signal, gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp);
+ s32 signal, gfp_t gfp)
+{
+ return cfg80211_inform_bss_width(wiphy, channel,
+ NL80211_BSS_CHAN_WIDTH_20,
+ bssid, tsf, capability,
+ beacon_interval, ie, ielen, signal,
+ gfp);
+}
struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
@@ -3471,6 +3628,19 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
*/
void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
+static inline enum nl80211_bss_scan_width
+cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return NL80211_BSS_CHAN_WIDTH_5;
+ case NL80211_CHAN_WIDTH_10:
+ return NL80211_BSS_CHAN_WIDTH_10;
+ default:
+ return NL80211_BSS_CHAN_WIDTH_20;
+ }
+}
+
/**
* cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
* @dev: network device
@@ -3886,6 +4056,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* @sig_dbm: signal strength in mBm, or 0 if unknown
* @buf: Management frame (header + body)
* @len: length of the frame data
+ * @flags: flags, as defined in enum nl80211_rxmgmt_flags
* @gfp: context flags
*
* This function is called whenever an Action frame is received for a station
@@ -3897,7 +4068,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* driver is responsible for rejecting the frame.
*/
bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
- const u8 *buf, size_t len, gfp_t gfp);
+ const u8 *buf, size_t len, u32 flags, gfp_t gfp);
/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 600d1d705bb8..8f59ca50477c 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -107,11 +107,11 @@ static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
}
struct sk_buff;
-extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
- __be32 from, __be32 to, int pseudohdr);
-extern void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
- const __be32 *from, const __be32 *to,
- int pseudohdr);
+void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
+ __be32 from, __be32 to, int pseudohdr);
+void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+ const __be32 *from, const __be32 *to,
+ int pseudohdr);
static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
__be16 from, __be16 to,
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 52adaa75dac9..33d03b648646 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -24,7 +24,7 @@ struct cgroup_cls_state
u32 classid;
};
-extern void sock_update_classid(struct sock *sk);
+void sock_update_classid(struct sock *sk);
#if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
static inline u32 task_cls_classid(struct task_struct *p)
diff --git a/include/net/dst.h b/include/net/dst.h
index 1f8fd109e225..3bc4865f8267 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -311,11 +311,13 @@ static inline void skb_dst_force(struct sk_buff *skb)
* __skb_tunnel_rx - prepare skb for rx reinsert
* @skb: buffer
* @dev: tunnel device
+ * @net: netns for packet i/o
*
* After decapsulation, packet is going to re-enter (netif_rx()) our stack,
* so make some cleanups. (no accounting done)
*/
-static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
+static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
{
skb->dev = dev;
@@ -327,8 +329,7 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
if (!skb->l4_rxhash)
skb->rxhash = 0;
skb_set_queue_mapping(skb, 0);
- skb_dst_drop(skb);
- nf_reset(skb);
+ skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
}
/**
@@ -340,12 +341,13 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
* so make some cleanups, and perform accounting.
* Note: this accounting is not SMP safe.
*/
-static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
+static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
{
/* TODO : stats should be SMP safe */
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
- __skb_tunnel_rx(skb, dev);
+ __skb_tunnel_rx(skb, dev, net);
}
/* Children define the path of the packet through the
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index e361f4882426..4b2b557fb0e8 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -10,21 +10,25 @@
struct fib_rule {
struct list_head list;
- atomic_t refcnt;
int iifindex;
int oifindex;
u32 mark;
u32 mark_mask;
- u32 pref;
u32 flags;
u32 table;
u8 action;
+ /* 3 bytes hole, try to use */
u32 target;
struct fib_rule __rcu *ctarget;
+ struct net *fr_net;
+
+ atomic_t refcnt;
+ u32 pref;
+ int suppress_ifgroup;
+ int suppress_prefixlen;
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
struct rcu_head rcu;
- struct net * fr_net;
};
struct fib_lookup_arg {
@@ -46,6 +50,8 @@ struct fib_rules_ops {
int (*action)(struct fib_rule *,
struct flowi *, int,
struct fib_lookup_arg *);
+ bool (*suppress)(struct fib_rule *,
+ struct fib_lookup_arg *);
int (*match)(struct fib_rule *,
struct flowi *, int);
int (*configure)(struct fib_rule *,
@@ -80,6 +86,8 @@ struct fib_rules_ops {
[FRA_FWMARK] = { .type = NLA_U32 }, \
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
+ [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
+ [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
[FRA_GOTO] = { .type = NLA_U32 }
static inline void fib_rule_get(struct fib_rule *rule)
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index c6d07cb074bc..8b5b71433297 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -230,6 +230,10 @@ enum ieee80211_radiotap_type {
#define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */
#define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */
#define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */
+#define IEEE80211_CHAN_GSM 0x1000 /* GSM (900 MHz) */
+#define IEEE80211_CHAN_STURBO 0x2000 /* Static Turbo */
+#define IEEE80211_CHAN_HALF 0x4000 /* Half channel (10 MHz wide) */
+#define IEEE80211_CHAN_QUARTER 0x8000 /* Quarter channel (5 MHz wide) */
/* For IEEE80211_RADIOTAP_FLAGS */
#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 736b5fb95474..02ef7727bb55 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -171,12 +171,17 @@ struct inet6_dev {
struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb;
spinlock_t mc_lock;
- unsigned char mc_qrv;
+
+ unsigned char mc_qrv; /* Query Robustness Variable */
unsigned char mc_gq_running;
unsigned char mc_ifc_count;
unsigned char mc_dad_count;
- unsigned long mc_v1_seen;
+
+ unsigned long mc_v1_seen; /* Max time we stay in MLDv1 mode */
+ unsigned long mc_qi; /* Query Interval */
+ unsigned long mc_qri; /* Query Response Interval */
unsigned long mc_maxdelay;
+
struct timer_list mc_gq_timer; /* general query timer */
struct timer_list mc_ifc_timer; /* interface change timer */
struct timer_list mc_dad_timer; /* dad complete mc timer */
diff --git a/include/net/ip.h b/include/net/ip.h
index a68f838a132c..48f55979d842 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -194,7 +194,17 @@ static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp
}
#endif
extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
-extern void snmp_mib_free(void __percpu *ptr[2]);
+
+static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
+{
+ int i;
+
+ BUG_ON(ptr == NULL);
+ for (i = 0; i < SNMP_ARRAY_SZ; i++) {
+ free_percpu(ptr[i]);
+ ptr[i] = NULL;
+ }
+}
extern struct local_ports {
seqlock_t lock;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f667248202b6..f525e7038cca 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -112,8 +112,6 @@ extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
const struct in6_addr *addr,
bool anycast);
-extern int ip6_dst_hoplimit(struct dst_entry *dst);
-
/*
* support functions for ND
*
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 4da5de10d1d4..6d1549c4893c 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,7 @@ struct __ip6_tnl_parm {
struct ip6_tnl {
struct ip6_tnl __rcu *next; /* next tunnel in list */
struct net_device *dev; /* virtual device associated with tunnel */
+ struct net *net; /* netns for packet i/o */
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
struct dst_entry *dst_cache; /* cached dst */
@@ -74,7 +75,6 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
int pkt_len, err;
- nf_reset(skb);
pkt_len = skb->len;
err = ip6_local_out(skb);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index a354db5b7662..a0a4a100f5c9 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -86,12 +86,12 @@ struct tnl_ptk_info {
#define PACKET_RCVD 0
#define PACKET_REJECT 1
-#define IP_TNL_HASH_BITS 10
+#define IP_TNL_HASH_BITS 7
#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
struct ip_tunnel_net {
- struct hlist_head *tunnels;
struct net_device *fb_tunnel_dev;
+ struct hlist_head tunnels[IP_TNL_HASH_SIZE];
};
#ifdef CONFIG_INET
@@ -102,7 +102,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
@@ -146,10 +146,9 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
}
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
-int iptunnel_xmit(struct net *net, struct rtable *rt,
- struct sk_buff *skb,
+int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df);
+ __u8 tos, __u8 ttl, __be16 df, bool xnet);
static inline void iptunnel_xmit_stats(int err,
struct net_device_stats *err_stats,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 5fe564985171..bbf1c8fb8511 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -41,6 +41,7 @@
#define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
#define NEXTHDR_NONE 59 /* No next header */
#define NEXTHDR_DEST 60 /* Destination options header. */
+#define NEXTHDR_SCTP 132 /* SCTP message. */
#define NEXTHDR_MOBILITY 135 /* Mobility header. */
#define NEXTHDR_MAX 255
@@ -657,6 +658,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+extern int ip6_dst_hoplimit(struct dst_entry *dst);
+
/*
* Header manipulation
*/
diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h
index 0af8b8dfbc22..550c2d6ec7ff 100644
--- a/include/net/irda/irlan_common.h
+++ b/include/net/irda/irlan_common.h
@@ -32,6 +32,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <net/irda/irttp.h>
@@ -161,7 +162,7 @@ struct irlan_provider_cb {
int access_type; /* Access type */
__u16 send_arb_val;
- __u8 mac_address[6]; /* Generated MAC address for peer device */
+ __u8 mac_address[ETH_ALEN]; /* Generated MAC address for peer device */
};
/*
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index b595a004d31b..f0cb909b60eb 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,36 +62,6 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */
-/**
- * llc_mac_null - determines if a address is a null mac address
- * @mac: Mac address to test if null.
- *
- * Determines if a given address is a null mac address. Returns 0 if the
- * address is not a null mac, 1 if the address is a null mac.
- */
-static inline int llc_mac_null(const u8 *mac)
-{
- return is_zero_ether_addr(mac);
-}
-
-static inline int llc_mac_multicast(const u8 *mac)
-{
- return is_multicast_ether_addr(mac);
-}
-/**
- * llc_mac_match - determines if two mac addresses are the same
- * @mac1: First mac address to compare.
- * @mac2: Second mac address to compare.
- *
- * Determines if two given mac address are the same. Returns 0 if there
- * is not a complete match up to len, 1 if a complete match up to len is
- * found.
- */
-static inline int llc_mac_match(const u8 *mac1, const u8 *mac2)
-{
- return !compare_ether_addr(mac1, mac2);
-}
-
extern int llc_establish_connection(struct sock *sk, u8 *lmac,
u8 *dmac, u8 dsap);
extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 551ba6a6a073..cc6035f1a2f1 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -152,11 +152,14 @@ struct ieee80211_low_level_stats {
* @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed
* @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed
* @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
+ * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
+ * this is used only with channel switching with CSA
*/
enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0),
IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1),
IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
+ IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
};
/**
@@ -372,7 +375,7 @@ struct ieee80211_bss_conf {
};
/**
- * enum mac80211_tx_control_flags - flags to describe transmission information/status
+ * enum mac80211_tx_info_flags - flags to describe transmission information/status
*
* These flags are used with the @flags member of &ieee80211_tx_info.
*
@@ -468,7 +471,7 @@ struct ieee80211_bss_conf {
* Note: If you have to add new flags to the enumeration, then don't
* forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
*/
-enum mac80211_tx_control_flags {
+enum mac80211_tx_info_flags {
IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
IEEE80211_TX_CTL_ASSIGN_SEQ = BIT(1),
IEEE80211_TX_CTL_NO_ACK = BIT(2),
@@ -504,6 +507,18 @@ enum mac80211_tx_control_flags {
#define IEEE80211_TX_CTL_STBC_SHIFT 23
+/**
+ * enum mac80211_tx_control_flags - flags to describe transmit control
+ *
+ * @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control
+ * protocol frame (e.g. EAP)
+ *
+ * These flags are used in tx_info->control.flags.
+ */
+enum mac80211_tx_control_flags {
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
+};
+
/*
* This definition is used as a mask to clear all temporary flags, which are
* set by the tx handlers for each transmission attempt by the mac80211 stack.
@@ -677,7 +692,8 @@ struct ieee80211_tx_info {
/* NB: vif can be NULL for injected frames */
struct ieee80211_vif *vif;
struct ieee80211_key_conf *hw_key;
- /* 8 bytes free */
+ u32 flags;
+ /* 4 bytes free */
} control;
struct {
struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
@@ -811,6 +827,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
* is stored in the @ampdu_delimiter_crc field)
* @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
+ * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
+ * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -839,6 +857,8 @@ enum mac80211_rx_flags {
RX_FLAG_80P80MHZ = BIT(24),
RX_FLAG_160MHZ = BIT(25),
RX_FLAG_STBC_MASK = BIT(26) | BIT(27),
+ RX_FLAG_10MHZ = BIT(28),
+ RX_FLAG_5MHZ = BIT(29),
};
#define RX_FLAG_STBC_SHIFT 26
@@ -1004,11 +1024,11 @@ enum ieee80211_smps_mode {
* @radar_enabled: whether radar detection is enabled
*
* @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame
- * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
- * but actually means the number of transmissions not the number of retries
+ * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
+ * but actually means the number of transmissions not the number of retries
* @short_frame_max_tx_count: Maximum number of transmissions for a "short"
- * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
- * number of transmissions not the number of retries
+ * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
+ * number of transmissions not the number of retries
*
* @smps_mode: spatial multiplexing powersave mode; note that
* %IEEE80211_SMPS_STATIC is used when the device is not
@@ -1080,6 +1100,7 @@ enum ieee80211_vif_flags {
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
+ * @csa_active: marks whether a channel switch is going on
* @driver_flags: flags/capabilities the driver has for this interface,
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
@@ -1092,7 +1113,7 @@ enum ieee80211_vif_flags {
* be off when it is %NULL there can still be races and packets could be
* processed after it switches back to %NULL.
* @debugfs_dir: debugfs dentry, can be used by drivers to create own per
- * interface debug files. Note that it will be NULL for the virtual
+ * interface debug files. Note that it will be NULL for the virtual
* monitor interface (if that is requested.)
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *).
@@ -1102,6 +1123,7 @@ struct ieee80211_vif {
struct ieee80211_bss_conf bss_conf;
u8 addr[ETH_ALEN];
bool p2p;
+ bool csa_active;
u8 cab_queue;
u8 hw_queue[IEEE80211_NUM_ACS];
@@ -1425,10 +1447,10 @@ struct ieee80211_tx_control {
* the stack.
*
* @IEEE80211_HW_CONNECTION_MONITOR:
- * The hardware performs its own connection monitoring, including
- * periodic keep-alives to the AP and probing the AP on beacon loss.
- * When this flag is set, signaling beacon-loss will cause an immediate
- * change to disassociated state.
+ * The hardware performs its own connection monitoring, including
+ * periodic keep-alives to the AP and probing the AP on beacon loss.
+ * When this flag is set, signaling beacon-loss will cause an immediate
+ * change to disassociated state.
*
* @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC:
* This device needs to get data from beacon before association (i.e.
@@ -1527,10 +1549,10 @@ enum ieee80211_hw_flags {
* @channel_change_time: time (in microseconds) it takes to change channels.
*
* @max_signal: Maximum value for signal (rssi) in RX information, used
- * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
+ * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
*
* @max_listen_interval: max listen interval in units of beacon interval
- * that HW supports
+ * that HW supports
*
* @queues: number of available hardware transmit queues for
* data packets. WMM/QoS requires at least four, these
@@ -2444,7 +2466,7 @@ enum ieee80211_roc_type {
* The callback can sleep.
*
* @set_tsf: Set the TSF timer to the specified value in the firmware/hardware.
- * Currently, this is only used for IBSS mode debugging. Is not a
+ * Currently, this is only used for IBSS mode debugging. Is not a
* required function.
* The callback can sleep.
*
@@ -2495,8 +2517,8 @@ enum ieee80211_roc_type {
* in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
* accordingly. This callback is not required and may sleep.
*
- * @testmode_cmd: Implement a cfg80211 test mode command.
- * The callback can sleep.
+ * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may
+ * be %NULL. The callback can sleep.
* @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep.
*
* @flush: Flush all pending frames from the hardware queue, making sure
@@ -2634,6 +2656,16 @@ enum ieee80211_roc_type {
* @ipv6_addr_change: IPv6 address assignment on the given interface changed.
* Currently, this is only called for managed or P2P client interfaces.
* This callback is optional; it must not sleep.
+ *
+ * @channel_switch_beacon: Starts a channel switch to a new channel.
+ * Beacons are modified to include CSA or ECSA IEs before calling this
+ * function. The corresponding count fields in these IEs must be
+ * decremented, and when they reach zero the driver must call
+ * ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
+ * get the csa counter decremented by mac80211, but must check if it is
+ * zero using ieee80211_csa_is_complete() after the beacon has been
+ * transmitted and then call ieee80211_csa_finish().
+ *
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -2747,7 +2779,8 @@ struct ieee80211_ops {
void (*rfkill_poll)(struct ieee80211_hw *hw);
void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
- int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len);
+ int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len);
@@ -2821,6 +2854,9 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct inet6_dev *idev);
#endif
+ void (*channel_switch_beacon)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef);
};
/**
@@ -2878,14 +2914,14 @@ enum ieee80211_tpt_led_trigger_flags {
};
#ifdef CONFIG_MAC80211_LEDS
-extern char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_create_tpt_led_trigger(
- struct ieee80211_hw *hw, unsigned int flags,
- const struct ieee80211_tpt_blink *blink_table,
- unsigned int blink_table_len);
+char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+ unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len);
#endif
/**
* ieee80211_get_tx_led_name - get name of TX LED
@@ -3316,6 +3352,25 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
}
/**
+ * ieee80211_csa_finish - notify mac80211 about channel switch
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * After a channel switch announcement was scheduled and the counter in this
+ * announcement hit zero, this function must be called by the driver to
+ * notify mac80211 that the channel can be changed.
+ */
+void ieee80211_csa_finish(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_csa_is_complete - find out if counters reached zero
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * This function returns whether the channel switch counters reached zero.
+ */
+bool ieee80211_csa_is_complete(struct ieee80211_vif *vif);
+
+
+/**
* ieee80211_proberesp_get - retrieve a Probe Response template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -3634,6 +3689,89 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq);
/**
+ * ieee80211_set_key_tx_seq - set key TX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @seq: new sequence data
+ *
+ * This function allows a driver to set the current TX IV/PNs for the
+ * given key. This is useful when resuming from WoWLAN sleep and the
+ * device may have transmitted frames using the PTK, e.g. replies to
+ * ARP requests.
+ *
+ * Note that this function may only be called when no TX processing
+ * can be done concurrently.
+ */
+void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
+ struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_set_key_rx_seq - set key RX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * the value on TID 0 is also used for non-QoS frames. For
+ * CMAC, only TID 0 is valid.
+ * @seq: new sequence data
+ *
+ * This function allows a driver to set the current RX IV/PNs for the
+ * given key. This is useful when resuming from WoWLAN sleep and GTK
+ * rekey may have been done while suspended. It should not be called
+ * if IV checking is done by the device and not by mac80211.
+ *
+ * Note that this function may only be called when no RX processing
+ * can be done concurrently.
+ */
+void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
+ int tid, struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_remove_key - remove the given key
+ * @keyconf: the parameter passed with the set key
+ *
+ * Remove the given key. If the key was uploaded to the hardware at the
+ * time this function is called, it is not deleted in the hardware but
+ * instead assumed to have been removed already.
+ *
+ * Note that due to locking considerations this function can (currently)
+ * only be called during key iteration (ieee80211_iter_keys().)
+ */
+void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
+ * @vif: the virtual interface to add the key on
+ * @keyconf: new key data
+ *
+ * When GTK rekeying was done while the system was suspended, (a) new
+ * key(s) will be available. These will be needed by mac80211 for proper
+ * RX processing, so this function allows setting them.
+ *
+ * The function returns the newly allocated key structure, which will
+ * have similar contents to the passed key configuration but point to
+ * mac80211-owned memory. In case of errors, the function returns an
+ * ERR_PTR(), use IS_ERR() etc.
+ *
+ * Note that this function assumes the key isn't added to hardware
+ * acceleration, so no TX will be done with the key. Since it's a GTK
+ * on managed (station) networks, this is true anyway. If the driver
+ * calls this function from the resume callback and subsequently uses
+ * the return code 1 to reconfigure the device, this key will be part
+ * of the reconfiguration.
+ *
+ * Note that the driver should also call ieee80211_set_key_rx_seq()
+ * for the new key for each TID to set up sequence counters properly.
+ *
+ * IMPORTANT: If this replaces a key that is present in the hardware,
+ * then it will attempt to remove it during this call. In many cases
+ * this isn't what you want, so call ieee80211_remove_key() first for
+ * the key that's being replaced.
+ */
+struct ieee80211_key_conf *
+ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf);
+
+/**
* ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying
* @vif: virtual interface the rekeying was done on
* @bssid: The BSSID of the AP, for checking association
@@ -4205,8 +4343,10 @@ struct rate_control_ops {
void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
void (*rate_init)(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta);
void (*rate_update)(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed);
void (*free_sta)(void *priv, struct ieee80211_sta *sta,
diff --git a/include/net/mld.h b/include/net/mld.h
index 467143cd4e2f..faa1d161bf24 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -63,13 +63,48 @@ struct mld2_query {
#define mld2q_mrc mld2q_hdr.icmp6_maxdelay
#define mld2q_resv1 mld2q_hdr.icmp6_dataun.un_data16[1]
-/* Max Response Code */
-#define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
-#define MLDV2_EXP(thresh, nbmant, nbexp, value) \
- ((value) < (thresh) ? (value) : \
- ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
- (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
-
-#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
+/* RFC3810, 5.1.3. Maximum Response Code:
+ *
+ * If Maximum Response Code >= 32768, Maximum Response Code represents a
+ * floating-point value as follows:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1| exp | mant |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define MLDV2_MRC_EXP(value) (((value) >> 12) & 0x0007)
+#define MLDV2_MRC_MAN(value) ((value) & 0x0fff)
+
+/* RFC3810, 5.1.9. QQIC (Querier's Query Interval Code):
+ *
+ * If QQIC >= 128, QQIC represents a floating-point value as follows:
+ *
+ * 0 1 2 3 4 5 6 7
+ * +-+-+-+-+-+-+-+-+
+ * |1| exp | mant |
+ * +-+-+-+-+-+-+-+-+
+ */
+#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07)
+#define MLDV2_QQIC_MAN(value) ((value) & 0x0f)
+
+static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
+{
+ /* RFC3810, 5.1.3. Maximum Response Code */
+ unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc);
+
+ if (mc_mrc < 32768) {
+ ret = mc_mrc;
+ } else {
+ unsigned long mc_man, mc_exp;
+
+ mc_exp = MLDV2_MRC_EXP(mc_mrc);
+ mc_man = MLDV2_MRC_MAN(mc_mrc);
+
+ ret = (mc_man | 0x1000) << (mc_exp + 3);
+ }
+
+ return ret;
+}
#endif
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 6fea32340ae8..3c4211f0bed6 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -204,6 +204,11 @@ extern void ndisc_send_ns(struct net_device *dev,
extern void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr,
const struct in6_addr *daddr);
+extern void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override,
+ bool inc_opt);
extern void ndisc_send_redirect(struct sk_buff *skb,
const struct in6_addr *target);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 7e748ad8b50c..536501a3e58d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -195,68 +195,67 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
-extern void neigh_table_init(struct neigh_table *tbl);
-extern int neigh_table_clear(struct neigh_table *tbl);
-extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
- const void *pkey,
- struct net_device *dev);
-extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl,
- struct net *net,
- const void *pkey);
-extern struct neighbour * __neigh_create(struct neigh_table *tbl,
- const void *pkey,
- struct net_device *dev,
- bool want_ref);
+void neigh_table_init(struct neigh_table *tbl);
+int neigh_table_clear(struct neigh_table *tbl);
+struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev);
+struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
+ const void *pkey);
+struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev, bool want_ref);
static inline struct neighbour *neigh_create(struct neigh_table *tbl,
const void *pkey,
struct net_device *dev)
{
return __neigh_create(tbl, pkey, dev, true);
}
-extern void neigh_destroy(struct neighbour *neigh);
-extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
-extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
- u32 flags);
-extern void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
-extern int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
-extern int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
-extern int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
-extern int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
-extern int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
-extern struct neighbour *neigh_event_ns(struct neigh_table *tbl,
+void neigh_destroy(struct neighbour *neigh);
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags);
+void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
+struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
struct net_device *dev);
-extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl);
-extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
+struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
+ struct neigh_table *tbl);
+void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
static inline
-struct net *neigh_parms_net(const struct neigh_parms *parms)
+struct net *neigh_parms_net(const struct neigh_parms *parms)
{
return read_pnet(&parms->net);
}
-extern unsigned long neigh_rand_reach_time(unsigned long base);
+unsigned long neigh_rand_reach_time(unsigned long base);
-extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
- struct sk_buff *skb);
-extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev, int creat);
-extern struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
- struct net *net,
- const void *key,
- struct net_device *dev);
-extern int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev);
+void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
+ struct sk_buff *skb);
+struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
+ const void *key, struct net_device *dev,
+ int creat);
+struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
+ const void *key, struct net_device *dev);
+int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
+ struct net_device *dev);
-static inline
-struct net *pneigh_net(const struct pneigh_entry *pneigh)
+static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
{
return read_pnet(&pneigh->net);
}
-extern void neigh_app_ns(struct neighbour *n);
-extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
-extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
-extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
+void neigh_app_ns(struct neighbour *n);
+void neigh_for_each(struct neigh_table *tbl,
+ void (*cb)(struct neighbour *, void *), void *cookie);
+void __neigh_for_each_release(struct neigh_table *tbl,
+ int (*cb)(struct neighbour *));
+void pneigh_for_each(struct neigh_table *tbl,
+ void (*cb)(struct pneigh_entry *));
struct neigh_seq_state {
struct seq_net_private p;
@@ -270,15 +269,14 @@ struct neigh_seq_state {
#define NEIGH_SEQ_IS_PNEIGH 0x00000002
#define NEIGH_SEQ_SKIP_NOARP 0x00000004
};
-extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int);
-extern void *neigh_seq_next(struct seq_file *, void *, loff_t *);
-extern void neigh_seq_stop(struct seq_file *, void *);
-
-extern int neigh_sysctl_register(struct net_device *dev,
- struct neigh_parms *p,
- char *p_name,
- proc_handler *proc_handler);
-extern void neigh_sysctl_unregister(struct neigh_parms *p);
+void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
+ unsigned int);
+void *neigh_seq_next(struct seq_file *, void *, loff_t *);
+void neigh_seq_stop(struct seq_file *, void *);
+
+int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
+ char *p_name, proc_handler *proc_handler);
+void neigh_sysctl_unregister(struct neigh_parms *p);
static inline void __neigh_parms_put(struct neigh_parms *parms)
{
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 84e37b1ca9e1..1313456a0994 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -119,7 +119,6 @@ struct net {
struct netns_ipvs *ipvs;
#endif
struct sock *diag_nlsk;
- atomic_t rt_genid;
atomic_t fnhe_genid;
};
@@ -333,14 +332,42 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
}
#endif
-static inline int rt_genid(struct net *net)
+static inline int rt_genid_ipv4(struct net *net)
{
- return atomic_read(&net->rt_genid);
+ return atomic_read(&net->ipv4.rt_genid);
}
-static inline void rt_genid_bump(struct net *net)
+static inline void rt_genid_bump_ipv4(struct net *net)
{
- atomic_inc(&net->rt_genid);
+ atomic_inc(&net->ipv4.rt_genid);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int rt_genid_ipv6(struct net *net)
+{
+ return atomic_read(&net->ipv6.rt_genid);
+}
+
+static inline void rt_genid_bump_ipv6(struct net *net)
+{
+ atomic_inc(&net->ipv6.rt_genid);
+}
+#else
+static inline int rt_genid_ipv6(struct net *net)
+{
+ return 0;
+}
+
+static inline void rt_genid_bump_ipv6(struct net *net)
+{
+}
+#endif
+
+/* For callers who don't really care about whether it's IPv4 or IPv6 */
+static inline void rt_genid_bump_all(struct net *net)
+{
+ rt_genid_bump_ipv4(net);
+ rt_genid_bump_ipv6(net);
}
static inline int fnhe_genid(struct net *net)
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 644d9c223d24..0c1288a50e8b 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -181,8 +181,7 @@ __nf_conntrack_find(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple);
extern int nf_conntrack_hash_check_insert(struct nf_conn *ct);
-extern void nf_ct_delete_from_lists(struct nf_conn *ct);
-extern void nf_ct_dying_timeout(struct nf_conn *ct);
+bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
@@ -235,7 +234,7 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
}
/* These are for NAT. Icky. */
-extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
@@ -249,7 +248,9 @@ extern void nf_ct_untracked_status_or(unsigned long bits);
/* Iterate over all conntracks: if iter returns true, it's deleted. */
extern void
-nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
+nf_ct_iterate_cleanup(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report);
extern void nf_conntrack_free(struct nf_conn *ct);
extern struct nf_conn *
nf_conntrack_alloc(struct net *net, u16 zone,
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 977bc8a46444..ff95434e50ca 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -10,6 +10,7 @@ enum nf_ct_ext_id {
#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
NF_CT_EXT_NAT,
#endif
+ NF_CT_EXT_SEQADJ,
NF_CT_EXT_ACCT,
#ifdef CONFIG_NF_CONNTRACK_EVENTS
NF_CT_EXT_ECACHE,
@@ -26,17 +27,22 @@ enum nf_ct_ext_id {
#ifdef CONFIG_NF_CONNTRACK_LABELS
NF_CT_EXT_LABELS,
#endif
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ NF_CT_EXT_SYNPROXY,
+#endif
NF_CT_EXT_NUM,
};
#define NF_CT_EXT_HELPER_TYPE struct nf_conn_help
#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
+#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj
#define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
+#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 914d8d900798..b411d7b17dec 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -148,17 +148,10 @@ extern int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
-#ifdef DEBUG_INVALID_PACKETS
#define LOG_INVALID(net, proto) \
((net)->ct.sysctl_log_invalid == (proto) || \
(net)->ct.sysctl_log_invalid == IPPROTO_RAW)
#else
-#define LOG_INVALID(net, proto) \
- (((net)->ct.sysctl_log_invalid == (proto) || \
- (net)->ct.sysctl_log_invalid == IPPROTO_RAW) \
- && net_ratelimit())
-#endif
-#else
static inline int LOG_INVALID(struct net *net, int proto) { return 0; }
#endif /* CONFIG_SYSCTL */
diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h
new file mode 100644
index 000000000000..f6177a5fe0ca
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_seqadj.h
@@ -0,0 +1,51 @@
+#ifndef _NF_CONNTRACK_SEQADJ_H
+#define _NF_CONNTRACK_SEQADJ_H
+
+#include <net/netfilter/nf_conntrack_extend.h>
+
+/**
+ * struct nf_ct_seqadj - sequence number adjustment information
+ *
+ * @correction_pos: position of the last TCP sequence number modification
+ * @offset_before: sequence number offset before last modification
+ * @offset_after: sequence number offset after last modification
+ */
+struct nf_ct_seqadj {
+ u32 correction_pos;
+ s32 offset_before;
+ s32 offset_after;
+};
+
+struct nf_conn_seqadj {
+ struct nf_ct_seqadj seq[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn_seqadj *nfct_seqadj(const struct nf_conn *ct)
+{
+ return nf_ct_ext_find(ct, NF_CT_EXT_SEQADJ);
+}
+
+static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct)
+{
+ return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC);
+}
+
+extern int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ s32 off);
+extern int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ __be32 seq, s32 off);
+extern void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ s32 off);
+
+extern int nf_ct_seq_adjust(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff);
+extern s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir,
+ u32 seq);
+
+extern int nf_conntrack_seqadj_init(void);
+extern void nf_conntrack_seqadj_fini(void);
+
+#endif /* _NF_CONNTRACK_SEQADJ_H */
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
new file mode 100644
index 000000000000..806f54a290d6
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -0,0 +1,77 @@
+#ifndef _NF_CONNTRACK_SYNPROXY_H
+#define _NF_CONNTRACK_SYNPROXY_H
+
+#include <net/netns/generic.h>
+
+struct nf_conn_synproxy {
+ u32 isn;
+ u32 its;
+ u32 tsoff;
+};
+
+static inline struct nf_conn_synproxy *nfct_synproxy(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ return nf_ct_ext_find(ct, NF_CT_EXT_SYNPROXY);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ return nf_ct_ext_add(ct, NF_CT_EXT_SYNPROXY, GFP_ATOMIC);
+#else
+ return NULL;
+#endif
+}
+
+struct synproxy_stats {
+ unsigned int syn_received;
+ unsigned int cookie_invalid;
+ unsigned int cookie_valid;
+ unsigned int cookie_retrans;
+ unsigned int conn_reopened;
+};
+
+struct synproxy_net {
+ struct nf_conn *tmpl;
+ struct synproxy_stats __percpu *stats;
+};
+
+extern int synproxy_net_id;
+static inline struct synproxy_net *synproxy_pernet(struct net *net)
+{
+ return net_generic(net, synproxy_net_id);
+}
+
+struct synproxy_options {
+ u8 options;
+ u8 wscale;
+ u16 mss;
+ u32 tsval;
+ u32 tsecr;
+};
+
+struct tcphdr;
+struct xt_synproxy_info;
+extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ const struct tcphdr *th,
+ struct synproxy_options *opts);
+extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
+extern void synproxy_build_options(struct tcphdr *th,
+ const struct synproxy_options *opts);
+
+extern void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
+ struct synproxy_options *opts);
+extern void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
+
+extern unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
+ unsigned int protoff,
+ struct tcphdr *th,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_conn_synproxy *synproxy);
+
+#endif /* _NF_CONNTRACK_SYNPROXY_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index ad14a799fd2e..59a192420053 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -13,15 +13,6 @@ enum nf_nat_manip_type {
#define HOOK2MANIP(hooknum) ((hooknum) != NF_INET_POST_ROUTING && \
(hooknum) != NF_INET_LOCAL_IN)
-/* NAT sequence number modifications */
-struct nf_nat_seq {
- /* position of the last TCP sequence number modification (if any) */
- u_int32_t correction_pos;
-
- /* sequence number offset before and after last modification */
- int16_t offset_before, offset_after;
-};
-
#include <linux/list.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
#include <net/netfilter/nf_conntrack_extend.h>
@@ -39,7 +30,6 @@ struct nf_conn;
/* The structure embedded in the conntrack structure. */
struct nf_conn_nat {
struct hlist_node bysource;
- struct nf_nat_seq seq[IP_CT_DIR_MAX];
struct nf_conn *ct;
union nf_conntrack_nat_help help;
#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index b4d6bfc2af03..404324d1d0c4 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -39,28 +39,9 @@ extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
const char *rep_buffer,
unsigned int rep_len);
-extern void nf_nat_set_seq_adjust(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- __be32 seq, s16 off);
-extern int nf_nat_seq_adjust(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff);
-extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff);
-
/* Setup NAT on this expected conntrack so it follows master, but goes
* to port ct->master->saved_proto. */
extern void nf_nat_follow_master(struct nf_conn *ct,
struct nf_conntrack_expect *this);
-extern s16 nf_nat_get_offset(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq);
-
-extern void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
- u32 dir, int off);
-
#endif
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
deleted file mode 100644
index 36d9379d4c4b..000000000000
--- a/include/net/netfilter/nf_tproxy_core.h
+++ /dev/null
@@ -1,210 +0,0 @@
-#ifndef _NF_TPROXY_CORE_H
-#define _NF_TPROXY_CORE_H
-
-#include <linux/types.h>
-#include <linux/in.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/inet_hashtables.h>
-#include <net/inet6_hashtables.h>
-#include <net/tcp.h>
-
-#define NFT_LOOKUP_ANY 0
-#define NFT_LOOKUP_LISTENER 1
-#define NFT_LOOKUP_ESTABLISHED 2
-
-/* look up and get a reference to a matching socket */
-
-
-/* This function is used by the 'TPROXY' target and the 'socket'
- * match. The following lookups are supported:
- *
- * Explicit TProxy target rule
- * ===========================
- *
- * This is used when the user wants to intercept a connection matching
- * an explicit iptables rule. In this case the sockets are assumed
- * matching in preference order:
- *
- * - match: if there's a fully established connection matching the
- * _packet_ tuple, it is returned, assuming the redirection
- * already took place and we process a packet belonging to an
- * established connection
- *
- * - match: if there's a listening socket matching the redirection
- * (e.g. on-port & on-ip of the connection), it is returned,
- * regardless if it was bound to 0.0.0.0 or an explicit
- * address. The reasoning is that if there's an explicit rule, it
- * does not really matter if the listener is bound to an interface
- * or to 0. The user already stated that he wants redirection
- * (since he added the rule).
- *
- * "socket" match based redirection (no specific rule)
- * ===================================================
- *
- * There are connections with dynamic endpoints (e.g. FTP data
- * connection) that the user is unable to add explicit rules
- * for. These are taken care of by a generic "socket" rule. It is
- * assumed that the proxy application is trusted to open such
- * connections without explicit iptables rule (except of course the
- * generic 'socket' rule). In this case the following sockets are
- * matched in preference order:
- *
- * - match: if there's a fully established connection matching the
- * _packet_ tuple
- *
- * - match: if there's a non-zero bound listener (possibly with a
- * non-local address) We don't accept zero-bound listeners, since
- * then local services could intercept traffic going through the
- * box.
- *
- * Please note that there's an overlap between what a TPROXY target
- * and a socket match will match. Normally if you have both rules the
- * "socket" match will be the first one, effectively all packets
- * belonging to established connections going through that one.
- */
-static inline struct sock *
-nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
- const __be32 saddr, const __be32 daddr,
- const __be16 sport, const __be16 dport,
- const struct net_device *in, int lookup_type)
-{
- struct sock *sk;
-
- /* look up socket */
- switch (protocol) {
- case IPPROTO_TCP:
- switch (lookup_type) {
- case NFT_LOOKUP_ANY:
- sk = __inet_lookup(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- case NFT_LOOKUP_LISTENER:
- sk = inet_lookup_listener(net, &tcp_hashinfo,
- saddr, sport,
- daddr, dport,
- in->ifindex);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
-
- break;
- case NFT_LOOKUP_ESTABLISHED:
- sk = inet_lookup_established(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- break;
- }
- break;
- case IPPROTO_UDP:
- sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
- in->ifindex);
- if (sk && lookup_type != NFT_LOOKUP_ANY) {
- int connected = (sk->sk_state == TCP_ESTABLISHED);
- int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
- if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
- (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
- sock_put(sk);
- sk = NULL;
- }
- }
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- }
-
- pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
- protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
-
- return sk;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static inline struct sock *
-nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
- const struct in6_addr *saddr, const struct in6_addr *daddr,
- const __be16 sport, const __be16 dport,
- const struct net_device *in, int lookup_type)
-{
- struct sock *sk;
-
- /* look up socket */
- switch (protocol) {
- case IPPROTO_TCP:
- switch (lookup_type) {
- case NFT_LOOKUP_ANY:
- sk = inet6_lookup(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- case NFT_LOOKUP_LISTENER:
- sk = inet6_lookup_listener(net, &tcp_hashinfo,
- saddr, sport,
- daddr, ntohs(dport),
- in->ifindex);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
-
- break;
- case NFT_LOOKUP_ESTABLISHED:
- sk = __inet6_lookup_established(net, &tcp_hashinfo,
- saddr, sport, daddr, ntohs(dport),
- in->ifindex);
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- break;
- }
- break;
- case IPPROTO_UDP:
- sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
- in->ifindex);
- if (sk && lookup_type != NFT_LOOKUP_ANY) {
- int connected = (sk->sk_state == TCP_ESTABLISHED);
- int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
- if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
- (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
- sock_put(sk);
- sk = NULL;
- }
- }
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- }
-
- pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
- protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
-
- return sk;
-}
-#endif
-
-/* assign a socket to the skb -- consumes sk */
-void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
-
-#endif
diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h
index 86267a529514..aff88ba91391 100644
--- a/include/net/netfilter/nfnetlink_queue.h
+++ b/include/net/netfilter/nfnetlink_queue.h
@@ -15,6 +15,8 @@ int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo);
void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int diff);
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report);
#else
inline struct nf_conn *
nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
@@ -39,5 +41,11 @@ inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int diff)
{
}
+
+inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report)
+{
+ return 0;
+}
#endif /* NF_CONNTRACK */
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2ba9de89e8ec..bf2ec2202c56 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -77,5 +77,6 @@ struct netns_ipv4 {
struct fib_rules_ops *mr_rules_ops;
#endif
#endif
+ atomic_t rt_genid;
};
#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 005e2c2e39a9..0fb2401197c5 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -72,6 +72,7 @@ struct netns_ipv6 {
#endif
#endif
atomic_t dev_addr_genid;
+ atomic_t rt_genid;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
index a24f8bb3ca47..099d02782e22 100644
--- a/include/net/netprio_cgroup.h
+++ b/include/net/netprio_cgroup.h
@@ -25,7 +25,7 @@ struct netprio_map {
u32 priomap[];
};
-extern void sock_update_netprioidx(struct sock *sk);
+void sock_update_netprioidx(struct sock *sk);
#if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 5f286b726bb6..f68ee68e4e3e 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -224,6 +224,9 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev,
u8 *gt, u8 gt_len);
u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result);
+
int nfc_targets_found(struct nfc_dev *dev,
struct nfc_target *targets, int ntargets);
int nfc_target_lost(struct nfc_dev *dev, u32 target_idx);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 13174509cdfd..2ebef77a2f9a 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -14,8 +14,8 @@ struct tcf_walker {
int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
};
-extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
-extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+int register_tcf_proto_ops(struct tcf_proto_ops *ops);
+int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
@@ -126,17 +126,17 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
return 0;
}
-extern int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
- struct nlattr **tb, struct nlattr *rate_tlv,
- struct tcf_exts *exts,
- const struct tcf_ext_map *map);
-extern void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
-extern void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
- struct tcf_exts *src);
-extern int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map);
-extern int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map);
+int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
+ struct nlattr **tb, struct nlattr *rate_tlv,
+ struct tcf_exts *exts,
+ const struct tcf_ext_map *map);
+void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
+void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
+ struct tcf_exts *src);
+int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
+ const struct tcf_ext_map *map);
+int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
+ const struct tcf_ext_map *map);
/**
* struct tcf_pkt_info - packet information
@@ -239,14 +239,14 @@ struct tcf_ematch_ops {
struct list_head link;
};
-extern int tcf_em_register(struct tcf_ematch_ops *);
-extern void tcf_em_unregister(struct tcf_ematch_ops *);
-extern int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
- struct tcf_ematch_tree *);
-extern void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
-extern int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
-extern int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
- struct tcf_pkt_info *);
+int tcf_em_register(struct tcf_ematch_ops *);
+void tcf_em_unregister(struct tcf_ematch_ops *);
+int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
+ struct tcf_ematch_tree *);
+void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
+int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
+int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
+ struct tcf_pkt_info *);
/**
* tcf_em_tree_change - replace ematch tree of a running classifier
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 388bf8b6d060..59ec3cd15d68 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -64,8 +64,8 @@ struct qdisc_watchdog {
struct Qdisc *qdisc;
};
-extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-extern void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
psched_time_t expires)
@@ -73,31 +73,34 @@ static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
}
-extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
+void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
extern struct Qdisc_ops pfifo_qdisc_ops;
extern struct Qdisc_ops bfifo_qdisc_ops;
extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
-extern int fifo_set_limit(struct Qdisc *q, unsigned int limit);
-extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
- unsigned int limit);
-
-extern int register_qdisc(struct Qdisc_ops *qops);
-extern int unregister_qdisc(struct Qdisc_ops *qops);
-extern void qdisc_list_del(struct Qdisc *q);
-extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
-extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
-extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
- struct nlattr *tab);
-extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
-extern void qdisc_put_stab(struct qdisc_size_table *tab);
-extern void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc);
-extern int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
- struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock);
-
-extern void __qdisc_run(struct Qdisc *q);
+int fifo_set_limit(struct Qdisc *q, unsigned int limit);
+struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
+ unsigned int limit);
+
+int register_qdisc(struct Qdisc_ops *qops);
+int unregister_qdisc(struct Qdisc_ops *qops);
+void qdisc_get_default(char *id, size_t len);
+int qdisc_set_default(const char *id);
+
+void qdisc_list_del(struct Qdisc *q);
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
+struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
+struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ struct nlattr *tab);
+void qdisc_put_rtab(struct qdisc_rate_table *tab);
+void qdisc_put_stab(struct qdisc_size_table *tab);
+void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc);
+int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev, struct netdev_queue *txq,
+ spinlock_t *root_lock);
+
+void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
@@ -105,10 +108,10 @@ static inline void qdisc_run(struct Qdisc *q)
__qdisc_run(q);
}
-extern int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res);
-extern int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res);
+int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res);
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e5ae0c50fa9c..f4eb365f7dcd 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -316,6 +316,7 @@ extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
+extern const struct Qdisc_ops *default_qdisc_ops;
struct Qdisc_class_common {
u32 classid;
@@ -350,30 +351,32 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
return NULL;
}
-extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
-extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
-extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
-
-extern void dev_init_scheduler(struct net_device *dev);
-extern void dev_shutdown(struct net_device *dev);
-extern void dev_activate(struct net_device *dev);
-extern void dev_deactivate(struct net_device *dev);
-extern void dev_deactivate_many(struct list_head *head);
-extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
- struct Qdisc *qdisc);
-extern void qdisc_reset(struct Qdisc *qdisc);
-extern void qdisc_destroy(struct Qdisc *qdisc);
-extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
-extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops);
-extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops, u32 parentid);
-extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
- const struct qdisc_size_table *stab);
-extern void tcf_destroy(struct tcf_proto *tp);
-extern void tcf_destroy_chain(struct tcf_proto **fl);
+int qdisc_class_hash_init(struct Qdisc_class_hash *);
+void qdisc_class_hash_insert(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_remove(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
+void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
+
+void dev_init_scheduler(struct net_device *dev);
+void dev_shutdown(struct net_device *dev);
+void dev_activate(struct net_device *dev);
+void dev_deactivate(struct net_device *dev);
+void dev_deactivate_many(struct list_head *head);
+struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc);
+void qdisc_reset(struct Qdisc *qdisc);
+void qdisc_destroy(struct Qdisc *qdisc);
+void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ const struct Qdisc_ops *ops);
+struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+ const struct Qdisc_ops *ops, u32 parentid);
+void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab);
+void tcf_destroy(struct tcf_proto *tp);
+void tcf_destroy_chain(struct tcf_proto **fl);
/* Reset all TX qdiscs greater then index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
@@ -698,7 +701,8 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
return ((u64)len * r->mult) >> r->shift;
}
-extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+ const struct tc_ratespec *conf);
static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
const struct psched_ratecfg *r)
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index 49bc9577c61e..aa80bef3c9d5 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -22,16 +22,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_auth_h__
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 0cb08e6fb6df..259924d63ba6 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -25,10 +25,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Dinakaran Joseph
@@ -37,9 +34,6 @@
*
* Rewritten to use libcrc32c by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_checksum_h__
@@ -85,4 +79,19 @@ static inline __le32 sctp_end_cksum(__u32 crc32)
return cpu_to_le32(~crc32);
}
+/* Calculate the CRC32C checksum of an SCTP packet. */
+static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
+ unsigned int offset)
+{
+ const struct sk_buff *iter;
+
+ __u32 crc32 = sctp_start_cksum(skb->data + offset,
+ skb_headlen(skb) - offset);
+ skb_walk_frags(skb, iter)
+ crc32 = sctp_update_cksum((__u8 *) iter->data,
+ skb_headlen(iter), crc32);
+
+ return sctp_end_cksum(crc32);
+}
+
#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 35247271e557..832f2191489c 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -23,19 +23,17 @@
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
- * Please send any bug reports or fixes you make to one of the
- * following email addresses:
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
- * La Monte H.P. Yarroll <piggy@acm.org>
- * Karl Knutson <karl@athena.chicago.il.us>
- * Ardelle Fan <ardelle.fan@intel.com>
- * Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
*/
-
#ifndef __net_sctp_command_h__
#define __net_sctp_command_h__
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index ca50e0751e47..2f0a565a0fd5 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -25,10 +25,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -39,9 +36,6 @@
* Xingang Guo <xingang.guo@intel.com>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_constants_h__
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index d8e37ecea691..3794c5ad20fe 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -27,10 +27,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __net_sctp_h__
@@ -613,7 +607,7 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
*/
static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
{
- if (t->dst && !dst_check(t->dst, 0)) {
+ if (t->dst && !dst_check(t->dst, t->dst_cookie)) {
dst_release(t->dst);
t->dst = NULL;
}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 2a82d1384706..4ef75af340b6 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -27,10 +27,7 @@
*
* Please send any bug reports or fixes you make to the
* email addresses:
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -42,9 +39,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e745c92a1532..2174d8da0770 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -25,10 +25,7 @@
*
* Please send any bug reports or fixes you make to the
* email addresses:
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Randall Stewart <randall@sctp.chicago.il.us>
@@ -46,9 +43,6 @@
* Ryan Layer <rmlayer@us.ibm.com>
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_structs_h__
@@ -119,29 +113,27 @@ struct sctp_hashbucket {
/* The SCTP globals structure. */
extern struct sctp_globals {
- /* The following variables are implementation specific. */
-
- /* Default initialization values to be applied to new associations. */
- __u16 max_instreams;
- __u16 max_outstreams;
-
/* This is a list of groups of functions for each address
* family that we support.
*/
struct list_head address_families;
/* This is the hash of all endpoints. */
- int ep_hashsize;
struct sctp_hashbucket *ep_hashtable;
-
/* This is the hash of all associations. */
- int assoc_hashsize;
struct sctp_hashbucket *assoc_hashtable;
-
/* This is the sctp port control hash. */
- int port_hashsize;
struct sctp_bind_hashbucket *port_hashtable;
+ /* Sizes of above hashtables. */
+ int ep_hashsize;
+ int assoc_hashsize;
+ int port_hashsize;
+
+ /* Default initialization values to be applied to new associations. */
+ __u16 max_instreams;
+ __u16 max_outstreams;
+
/* Flag to indicate whether computing and verifying checksum
* is disabled. */
bool checksum_disable;
@@ -782,6 +774,7 @@ struct sctp_transport {
/* Has this transport moved the ctsn since we last sacked */
__u32 sack_generation;
+ u32 dst_cookie;
struct flowi fl;
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
index 2c5d2b4d5d1e..54bbbe547303 100644
--- a/include/net/sctp/tsnmap.h
+++ b/include/net/sctp/tsnmap.h
@@ -28,19 +28,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <net/sctp/constants.h>
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index ca4693b4e09e..27b9f5c90153 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -31,19 +31,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_ulpevent_h__
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index 00e50ba3f24b..b0cf5d54d717 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -30,18 +30,12 @@
*
* Please send any bug reports or fixes you make to the
* email addresses:
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_ulpqueue_h__
diff --git a/include/net/sock.h b/include/net/sock.h
index 31d5cfbb51ec..6ba2e7b0e2b1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -232,6 +232,7 @@ struct cg_proto;
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
+ * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
* @sk_sndbuf: size of send buffer in bytes
* @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -361,6 +362,7 @@ struct sock {
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
gfp_t sk_allocation;
+ u32 sk_pacing_rate; /* bytes per second */
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
int sk_gso_type;
@@ -746,11 +748,6 @@ static inline int sk_stream_wspace(const struct sock *sk)
extern void sk_stream_write_space(struct sock *sk);
-static inline bool sk_stream_memory_free(const struct sock *sk)
-{
- return sk->sk_wmem_queued < sk->sk_sndbuf;
-}
-
/* OOB backlog add */
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
@@ -950,6 +947,7 @@ struct proto {
unsigned int inuse_idx;
#endif
+ bool (*stream_memory_free)(const struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
@@ -1088,6 +1086,21 @@ static inline struct cg_proto *parent_cg_proto(struct proto *proto,
}
#endif
+static inline bool sk_stream_memory_free(const struct sock *sk)
+{
+ if (sk->sk_wmem_queued >= sk->sk_sndbuf)
+ return false;
+
+ return sk->sk_prot->stream_memory_free ?
+ sk->sk_prot->stream_memory_free(sk) : true;
+}
+
+static inline bool sk_stream_is_writeable(const struct sock *sk)
+{
+ return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
+ sk_stream_memory_free(sk);
+}
+
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
@@ -1509,6 +1522,7 @@ extern struct sk_buff *sock_rmalloc(struct sock *sk,
unsigned long size, int force,
gfp_t priority);
extern void sock_wfree(struct sk_buff *skb);
+extern void skb_orphan_partial(struct sk_buff *skb);
extern void sock_rfree(struct sk_buff *skb);
extern void sock_edemux(struct sk_buff *skb);
@@ -1527,7 +1541,8 @@ extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
unsigned long header_len,
unsigned long data_len,
int noblock,
- int *errcode);
+ int *errcode,
+ int max_page_order);
extern void *sock_kmalloc(struct sock *sk, int size,
gfp_t priority);
extern void sock_kfree_s(struct sock *sk, void *mem, int size);
@@ -2249,6 +2264,8 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
extern void sock_enable_timestamp(struct sock *sk, int flag);
extern int sock_get_timestamp(struct sock *, struct timeval __user *);
extern int sock_get_timestampns(struct sock *, struct timespec __user *);
+extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+ int level, int type);
/*
* Enable debug/info messages
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d1980054ec75..b1aa324c5e65 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -192,10 +192,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
#define TCPOLEN_EXP_FASTOPEN_BASE 4
-#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
-#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
-#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
-#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
/* But this is what stacks really send out. */
#define TCPOLEN_TSTAMP_ALIGNED 12
@@ -284,6 +280,8 @@ extern int sysctl_tcp_thin_dupack;
extern int sysctl_tcp_early_retrans;
extern int sysctl_tcp_limit_output_bytes;
extern int sysctl_tcp_challenge_ack_limit;
+extern unsigned int sysctl_tcp_notsent_lowat;
+extern int sysctl_tcp_min_tso_segs;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -373,8 +371,8 @@ extern void tcp_delack_timer_handler(struct sock *sk);
extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len);
-extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, unsigned int len);
+extern void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len);
extern void tcp_rcv_space_adjust(struct sock *sk);
extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -479,9 +477,13 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
+extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ u32 cookie);
extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt);
#ifdef CONFIG_SYN_COOKIES
+extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
+ const struct tcphdr *th, u16 *mssp);
extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
__u16 *mss);
#else
@@ -498,8 +500,12 @@ extern bool cookie_check_timestamp(struct tcp_options_received *opt,
struct net *net, bool *ecn_ok);
/* From net/ipv6/syncookies.c */
+extern int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+ u32 cookie);
extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
+extern u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+ const struct tcphdr *th, u16 *mssp);
extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
__u16 *mss);
#else
@@ -591,7 +597,6 @@ extern void tcp_initialize_rcv_mss(struct sock *sk);
extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
extern int tcp_mss_to_mtu(struct sock *sk, int mss);
extern void tcp_mtup_init(struct sock *sk);
-extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
extern void tcp_init_buffer_space(struct sock *sk);
static inline void tcp_bound_rto(const struct sock *sk)
@@ -1094,15 +1099,6 @@ static inline void tcp_openreq_init(struct request_sock *req,
ireq->loc_port = tcp_hdr(skb)->dest;
}
-/* Compute time elapsed between SYNACK and the ACK completing 3WHS */
-static inline void tcp_synack_rtt_meas(struct sock *sk,
- struct request_sock *req)
-{
- if (tcp_rsk(req)->snt_synack)
- tcp_valid_rtt_meas(sk,
- tcp_time_stamp - tcp_rsk(req)->snt_synack);
-}
-
extern void tcp_enter_memory_pressure(struct sock *sk);
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1313,7 +1309,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc);
+extern void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+ struct tcp_fastopen_cookie *foc);
#define TCP_FASTOPEN_KEY_LENGTH 16
@@ -1549,6 +1546,19 @@ extern int tcp_gro_complete(struct sk_buff *skb);
extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
__be32 daddr);
+static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
+{
+ return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
+}
+
+static inline bool tcp_stream_memory_free(const struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
+
+ return notsent_bytes < tcp_notsent_lowat(tp);
+}
+
#ifdef CONFIG_PROC_FS
extern int tcp4_proc_init(void);
extern void tcp4_proc_exit(void);
diff --git a/include/net/udp.h b/include/net/udp.h
index 74c10ec5e74f..ef2e0b7843a0 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -183,6 +183,7 @@ extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len);
extern int udp_push_pending_frames(struct sock *sk);
extern void udp_flush_pending_frames(struct sock *sk);
+extern void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
extern int udp_rcv(struct sk_buff *skb);
extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int udp_disconnect(struct sock *sk, int flags);
diff --git a/net/vmw_vsock/vsock_addr.h b/include/net/vsock_addr.h
index 9ccd5316eac0..9ccd5316eac0 100644
--- a/net/vmw_vsock/vsock_addr.h
+++ b/include/net/vsock_addr.h
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
new file mode 100644
index 000000000000..2d64d3cd4999
--- /dev/null
+++ b/include/net/vxlan.h
@@ -0,0 +1,40 @@
+#ifndef __NET_VXLAN_H
+#define __NET_VXLAN_H 1
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+
+struct vxlan_sock;
+typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
+
+/* per UDP socket information */
+struct vxlan_sock {
+ struct hlist_node hlist;
+ vxlan_rcv_t *rcv;
+ void *data;
+ struct work_struct del_work;
+ struct socket *sock;
+ struct rcu_head rcu;
+ struct hlist_head vni_list[VNI_HASH_SIZE];
+ atomic_t refcnt;
+};
+
+struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data,
+ bool no_share, bool ipv6);
+
+void vxlan_sock_release(struct vxlan_sock *vs);
+
+int vxlan_xmit_skb(struct vxlan_sock *vs,
+ struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port, __be32 vni);
+
+__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
+
+void vxlan_get_rx_port(struct net_device *netdev);
+#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e823786e7c66..e253bf0cc7ef 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1554,7 +1554,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32
int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
u32 xfrm_get_acqseq(void);
extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
-struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
+struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
u8 mode, u32 reqid, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 997f9f2f0963..e7c94eeb9475 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -227,6 +227,7 @@ header-y += kvm_para.h
endif
header-y += l2tp.h
+header-y += libc-compat.h
header-y += limits.h
header-y += llc.h
header-y += loop.h
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index ae07bec74f4b..4e27c82b564a 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -45,6 +45,7 @@ enum {
CGW_DST_IF, /* ifindex of destination network interface */
CGW_FILTER, /* specify struct can_filter on source CAN device */
CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */
+ CGW_LIM_HOPS, /* limit the number of hops of this specific rule */
__CGW_MAX
};
@@ -116,13 +117,19 @@ enum {
* Sets a CAN receive filter for the gateway job specified by the
* struct can_filter described in include/linux/can.h
*
- * CGW_MOD_XXX (length 17 bytes):
+ * CGW_MOD_(AND|OR|XOR|SET) (length 17 bytes):
* Specifies a modification that's done to a received CAN frame before it is
* send out to the destination interface.
*
* <struct can_frame> data used as operator
* <u8> affected CAN frame elements
*
+ * CGW_LIM_HOPS (length 1 byte):
+ * Limit the number of hops of this specific rule. Usually the received CAN
+ * frame can be processed as much as 'max_hops' times (which is given at module
+ * load time of the can-gw module). This value is used to reduce the number of
+ * possible hops for this gateway rule to a value smaller then max_hops.
+ *
* CGW_CS_XOR (length 4 bytes):
* Set a simple XOR checksum starting with an initial value into
* data[result-idx] using data[start-idx] .. data[end-idx]
diff --git a/include/uapi/linux/dn.h b/include/uapi/linux/dn.h
index 9c50445462d9..5fbdd3d49eba 100644
--- a/include/uapi/linux/dn.h
+++ b/include/uapi/linux/dn.h
@@ -2,6 +2,7 @@
#define _LINUX_DN_H
#include <linux/types.h>
+#include <linux/if_ether.h>
/*
@@ -120,7 +121,7 @@ struct linkinfo_dn {
* Ethernet address format (for DECnet)
*/
union etheraddress {
- __u8 dne_addr[6]; /* Full ethernet address */
+ __u8 dne_addr[ETH_ALEN]; /* Full ethernet address */
struct {
__u8 dne_hiord[4]; /* DECnet HIORD prefix */
__u8 dne_nodeaddr[2]; /* DECnet node address */
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 51da65b68b85..2b82d7e30974 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -44,8 +44,8 @@ enum {
FRA_FWMARK, /* mark */
FRA_FLOW, /* flow/class id */
FRA_UNUSED6,
- FRA_UNUSED7,
- FRA_UNUSED8,
+ FRA_SUPPRESS_IFGROUP,
+ FRA_SUPPRESS_PREFIXLEN,
FRA_TABLE, /* Extended table id */
FRA_FWMASK, /* mask for netfilter mark */
FRA_OIFNAME,
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index e0133c73c304..590beda78ea0 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -115,6 +115,8 @@ struct icmp6hdr {
#define ICMPV6_NOT_NEIGHBOUR 2
#define ICMPV6_ADDR_UNREACH 3
#define ICMPV6_PORT_UNREACH 4
+#define ICMPV6_POLICY_FAIL 5
+#define ICMPV6_REJECT_ROUTE 6
/*
* Codes for Time Exceeded
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 2d70d79ce2fd..39f621a9fe82 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -14,6 +14,7 @@
#define _UAPI_LINUX_IF_BRIDGE_H
#include <linux/types.h>
+#include <linux/if_ether.h>
#define SYSFS_BRIDGE_ATTR "bridge"
#define SYSFS_BRIDGE_FDB "brforward"
@@ -88,7 +89,7 @@ struct __port_info {
};
struct __fdb_entry {
- __u8 mac_addr[6];
+ __u8 mac_addr[ETH_ALEN];
__u8 port_no;
__u8 is_local;
__u32 ageing_timer_value;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 03f6170ab337..80394e8dc3a3 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -143,6 +143,7 @@ enum {
IFLA_NUM_TX_QUEUES,
IFLA_NUM_RX_QUEUES,
IFLA_CARRIER,
+ IFLA_PHYS_PORT_ID,
__IFLA_MAX
};
@@ -313,6 +314,8 @@ enum {
IFLA_VXLAN_L2MISS,
IFLA_VXLAN_L3MISS,
IFLA_VXLAN_PORT, /* destination port */
+ IFLA_VXLAN_GROUP6,
+ IFLA_VXLAN_LOCAL6,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index b950c02030c0..dbf06667394b 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -56,6 +56,7 @@ struct sockaddr_ll {
#define PACKET_FANOUT_LB 1
#define PACKET_FANOUT_CPU 2
#define PACKET_FANOUT_ROLLOVER 3
+#define PACKET_FANOUT_RND 4
#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index e36a4aecd311..e128769331b5 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -46,7 +46,7 @@ struct pppoe_addr {
* PPTP addressing definition
*/
struct pptp_addr {
- __be16 call_id;
+ __u16 call_id;
struct in_addr sin_addr;
};
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 82334f88967e..e9502dd1ee2c 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -56,6 +56,8 @@
#define TUNGETVNETHDRSZ _IOR('T', 215, int)
#define TUNSETVNETHDRSZ _IOW('T', 216, int)
#define TUNSETQUEUE _IOW('T', 217, int)
+#define TUNSETIFINDEX _IOW('T', 218, unsigned int)
+#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
@@ -70,6 +72,10 @@
#define IFF_DETACH_QUEUE 0x0400
/* read-only flag */
#define IFF_PERSIST 0x0800
+#define IFF_NOFILTER 0x1000
+
+/* Socket options */
+#define TUN_TX_TIMESTAMP 1
/* Features for GSO (TUNSETOFFLOAD). */
#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 9edb441df827..f9e8e496ae5d 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -24,30 +24,53 @@
/* Standard well-defined IP protocols. */
enum {
IPPROTO_IP = 0, /* Dummy protocol for TCP */
+#define IPPROTO_IP IPPROTO_IP
IPPROTO_ICMP = 1, /* Internet Control Message Protocol */
+#define IPPROTO_ICMP IPPROTO_ICMP
IPPROTO_IGMP = 2, /* Internet Group Management Protocol */
+#define IPPROTO_IGMP IPPROTO_IGMP
IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */
+#define IPPROTO_IPIP IPPROTO_IPIP
IPPROTO_TCP = 6, /* Transmission Control Protocol */
+#define IPPROTO_TCP IPPROTO_TCP
IPPROTO_EGP = 8, /* Exterior Gateway Protocol */
+#define IPPROTO_EGP IPPROTO_EGP
IPPROTO_PUP = 12, /* PUP protocol */
+#define IPPROTO_PUP IPPROTO_PUP
IPPROTO_UDP = 17, /* User Datagram Protocol */
+#define IPPROTO_UDP IPPROTO_UDP
IPPROTO_IDP = 22, /* XNS IDP protocol */
+#define IPPROTO_IDP IPPROTO_IDP
+ IPPROTO_TP = 29, /* SO Transport Protocol Class 4 */
+#define IPPROTO_TP IPPROTO_TP
IPPROTO_DCCP = 33, /* Datagram Congestion Control Protocol */
- IPPROTO_RSVP = 46, /* RSVP protocol */
+#define IPPROTO_DCCP IPPROTO_DCCP
+ IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */
+#define IPPROTO_IPV6 IPPROTO_IPV6
+ IPPROTO_RSVP = 46, /* RSVP Protocol */
+#define IPPROTO_RSVP IPPROTO_RSVP
IPPROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */
-
- IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */
-
- IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */
- IPPROTO_AH = 51, /* Authentication Header protocol */
- IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
- IPPROTO_PIM = 103, /* Protocol Independent Multicast */
-
- IPPROTO_COMP = 108, /* Compression Header protocol */
- IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
+#define IPPROTO_GRE IPPROTO_GRE
+ IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */
+#define IPPROTO_ESP IPPROTO_ESP
+ IPPROTO_AH = 51, /* Authentication Header protocol */
+#define IPPROTO_AH IPPROTO_AH
+ IPPROTO_MTP = 92, /* Multicast Transport Protocol */
+#define IPPROTO_MTP IPPROTO_MTP
+ IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
+#define IPPROTO_BEETPH IPPROTO_BEETPH
+ IPPROTO_ENCAP = 98, /* Encapsulation Header */
+#define IPPROTO_ENCAP IPPROTO_ENCAP
+ IPPROTO_PIM = 103, /* Protocol Independent Multicast */
+#define IPPROTO_PIM IPPROTO_PIM
+ IPPROTO_COMP = 108, /* Compression Header Protocol */
+#define IPPROTO_COMP IPPROTO_COMP
+ IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
+#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
-
- IPPROTO_RAW = 255, /* Raw IP packets */
+#define IPPROTO_UDPLITE IPPROTO_UDPLITE
+ IPPROTO_RAW = 255, /* Raw IP packets */
+#define IPPROTO_RAW IPPROTO_RAW
IPPROTO_MAX
};
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 53b1d56a6e7f..440d5c479145 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -22,22 +22,30 @@
#define _UAPI_LINUX_IN6_H
#include <linux/types.h>
+#include <linux/libc-compat.h>
/*
* IPv6 address structure
*/
+#if __UAPI_DEF_IN6_ADDR
struct in6_addr {
union {
__u8 u6_addr8[16];
+#if __UAPI_DEF_IN6_ADDR_ALT
__be16 u6_addr16[8];
__be32 u6_addr32[4];
+#endif
} in6_u;
#define s6_addr in6_u.u6_addr8
+#if __UAPI_DEF_IN6_ADDR_ALT
#define s6_addr16 in6_u.u6_addr16
#define s6_addr32 in6_u.u6_addr32
+#endif
};
+#endif /* __UAPI_DEF_IN6_ADDR */
+#if __UAPI_DEF_SOCKADDR_IN6
struct sockaddr_in6 {
unsigned short int sin6_family; /* AF_INET6 */
__be16 sin6_port; /* Transport layer port # */
@@ -45,7 +53,9 @@ struct sockaddr_in6 {
struct in6_addr sin6_addr; /* IPv6 address */
__u32 sin6_scope_id; /* scope id (new in RFC2553) */
};
+#endif /* __UAPI_DEF_SOCKADDR_IN6 */
+#if __UAPI_DEF_IPV6_MREQ
struct ipv6_mreq {
/* IPv6 multicast address of group */
struct in6_addr ipv6mr_multiaddr;
@@ -53,6 +63,7 @@ struct ipv6_mreq {
/* local IPv6 address of interface */
int ipv6mr_ifindex;
};
+#endif /* __UAPI_DEF_IVP6_MREQ */
#define ipv6mr_acaddr ipv6mr_multiaddr
@@ -114,13 +125,24 @@ struct in6_flowlabel_req {
/*
* IPV6 extension headers
*/
-#define IPPROTO_HOPOPTS 0 /* IPv6 hop-by-hop options */
-#define IPPROTO_ROUTING 43 /* IPv6 routing header */
-#define IPPROTO_FRAGMENT 44 /* IPv6 fragmentation header */
-#define IPPROTO_ICMPV6 58 /* ICMPv6 */
-#define IPPROTO_NONE 59 /* IPv6 no next header */
-#define IPPROTO_DSTOPTS 60 /* IPv6 destination options */
-#define IPPROTO_MH 135 /* IPv6 mobility header */
+#if __UAPI_DEF_IPPROTO_V6
+enum {
+ IPPROTO_HOPOPTS = 0, /* IPv6 hop-by-hop options */
+#define IPPROTO_HOPOPTS IPPROTO_HOPOPTS
+ IPPROTO_ROUTING = 43, /* IPv6 routing header */
+#define IPPROTO_ROUTING IPPROTO_ROUTING
+ IPPROTO_FRAGMENT = 44, /* IPv6 fragmentation header */
+#define IPPROTO_FRAGMENT IPPROTO_FRAGMENT
+ IPPROTO_ICMPV6 = 58, /* ICMPv6 */
+#define IPPROTO_ICMPV6 IPPROTO_ICMPV6
+ IPPROTO_NONE = 59, /* IPv6 no next header */
+#define IPPROTO_NONE IPPROTO_NONE
+ IPPROTO_DSTOPTS = 60, /* IPv6 destination options */
+#define IPPROTO_DSTOPTS IPPROTO_DSTOPTS
+ IPPROTO_MH = 135, /* IPv6 mobility header */
+#define IPPROTO_MH IPPROTO_MH
+};
+#endif /* __UAPI_DEF_IPPROTO_V6 */
/*
* IPv6 TLV options.
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 2fee45bdec0a..411959405ab6 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -162,6 +162,8 @@ enum
IPV4_DEVCONF_SRC_VMARK,
IPV4_DEVCONF_PROXY_ARP_PVLAN,
IPV4_DEVCONF_ROUTE_LOCALNET,
+ IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
+ IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 4bda4cf5b0f5..593b0e32d956 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -160,6 +160,9 @@ enum {
DEVCONF_ACCEPT_DAD,
DEVCONF_FORCE_TLLAO,
DEVCONF_NDISC_NOTIFY,
+ DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
+ DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
+ DEVCONF_SUPPRESS_FRAG_NDISC,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
new file mode 100644
index 000000000000..335e8a7cad39
--- /dev/null
+++ b/include/uapi/linux/libc-compat.h
@@ -0,0 +1,103 @@
+/*
+ * Compatibility interface for userspace libc header coordination:
+ *
+ * Define compatibility macros that are used to control the inclusion or
+ * exclusion of UAPI structures and definitions in coordination with another
+ * userspace C library.
+ *
+ * This header is intended to solve the problem of UAPI definitions that
+ * conflict with userspace definitions. If a UAPI header has such conflicting
+ * definitions then the solution is as follows:
+ *
+ * * Synchronize the UAPI header and the libc headers so either one can be
+ * used and such that the ABI is preserved. If this is not possible then
+ * no simple compatibility interface exists (you need to write translating
+ * wrappers and rename things) and you can't use this interface.
+ *
+ * Then follow this process:
+ *
+ * (a) Include libc-compat.h in the UAPI header.
+ * e.g. #include <linux/libc-compat.h>
+ * This include must be as early as possible.
+ *
+ * (b) In libc-compat.h add enough code to detect that the comflicting
+ * userspace libc header has been included first.
+ *
+ * (c) If the userspace libc header has been included first define a set of
+ * guard macros of the form __UAPI_DEF_FOO and set their values to 1, else
+ * set their values to 0.
+ *
+ * (d) Back in the UAPI header with the conflicting definitions, guard the
+ * definitions with:
+ * #if __UAPI_DEF_FOO
+ * ...
+ * #endif
+ *
+ * This fixes the situation where the linux headers are included *after* the
+ * libc headers. To fix the problem with the inclusion in the other order the
+ * userspace libc headers must be fixed like this:
+ *
+ * * For all definitions that conflict with kernel definitions wrap those
+ * defines in the following:
+ * #if !__UAPI_DEF_FOO
+ * ...
+ * #endif
+ *
+ * This prevents the redefinition of a construct already defined by the kernel.
+ */
+#ifndef _UAPI_LIBC_COMPAT_H
+#define _UAPI_LIBC_COMPAT_H
+
+/* We have included glibc headers... */
+#if defined(__GLIBC__)
+
+/* Coordinate with glibc netinet/in.h header. */
+#if defined(_NETINET_IN_H)
+
+/* GLIBC headers included first so don't define anything
+ * that would already be defined. */
+#define __UAPI_DEF_IN6_ADDR 0
+/* The exception is the in6_addr macros which must be defined
+ * if the glibc code didn't define them. This guard matches
+ * the guard in glibc/inet/netinet/in.h which defines the
+ * additional in6_addr macros e.g. s6_addr16, and s6_addr32. */
+#if defined(__USE_MISC) || defined (__USE_GNU)
+#define __UAPI_DEF_IN6_ADDR_ALT 0
+#else
+#define __UAPI_DEF_IN6_ADDR_ALT 1
+#endif
+#define __UAPI_DEF_SOCKADDR_IN6 0
+#define __UAPI_DEF_IPV6_MREQ 0
+#define __UAPI_DEF_IPPROTO_V6 0
+
+#else
+
+/* Linux headers included first, and we must define everything
+ * we need. The expectation is that glibc will check the
+ * __UAPI_DEF_* defines and adjust appropriately. */
+#define __UAPI_DEF_IN6_ADDR 1
+/* We unconditionally define the in6_addr macros and glibc must
+ * coordinate. */
+#define __UAPI_DEF_IN6_ADDR_ALT 1
+#define __UAPI_DEF_SOCKADDR_IN6 1
+#define __UAPI_DEF_IPV6_MREQ 1
+#define __UAPI_DEF_IPPROTO_V6 1
+
+#endif /* _NETINET_IN_H */
+
+
+/* If we did not see any headers from any supported C libraries,
+ * or we are being included in the kernel, then define everything
+ * that we need. */
+#else /* !defined(__GLIBC__) */
+
+/* Definitions for in6.h */
+#define __UAPI_DEF_IN6_ADDR 1
+#define __UAPI_DEF_IN6_ADDR_ALT 1
+#define __UAPI_DEF_SOCKADDR_IN6 1
+#define __UAPI_DEF_IPV6_MREQ 1
+#define __UAPI_DEF_IPPROTO_V6 1
+
+#endif /* __GLIBC__ */
+
+#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild
index 41115776d76f..174915420d3f 100644
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -22,6 +22,7 @@ header-y += xt_CONNMARK.h
header-y += xt_CONNSECMARK.h
header-y += xt_CT.h
header-y += xt_DSCP.h
+header-y += xt_HMARK.h
header-y += xt_IDLETIMER.h
header-y += xt_LED.h
header-y += xt_LOG.h
@@ -68,6 +69,7 @@ header-y += xt_quota.h
header-y += xt_rateest.h
header-y += xt_realm.h
header-y += xt_recent.h
+header-y += xt_rpfilter.h
header-y += xt_sctp.h
header-y += xt_set.h
header-y += xt_socket.h
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index d69483fb3825..8dd803818ebe 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -99,7 +99,8 @@ enum ip_conntrack_events {
IPCT_PROTOINFO, /* protocol information has changed */
IPCT_HELPER, /* new helper has been set */
IPCT_MARK, /* new mark has been set */
- IPCT_NATSEQADJ, /* NAT is doing sequence adjustment */
+ IPCT_SEQADJ, /* sequence adjustment has changed */
+ IPCT_NATSEQADJ = IPCT_SEQADJ,
IPCT_SECMARK, /* new security mark has been set */
IPCT_LABEL, /* new connlabel has been set */
};
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 08fabc6c93f3..acad6c52a652 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -42,8 +42,10 @@ enum ctattr_type {
CTA_ID,
CTA_NAT_DST,
CTA_TUPLE_MASTER,
- CTA_NAT_SEQ_ADJ_ORIG,
- CTA_NAT_SEQ_ADJ_REPLY,
+ CTA_SEQ_ADJ_ORIG,
+ CTA_NAT_SEQ_ADJ_ORIG = CTA_SEQ_ADJ_ORIG,
+ CTA_SEQ_ADJ_REPLY,
+ CTA_NAT_SEQ_ADJ_REPLY = CTA_SEQ_ADJ_REPLY,
CTA_SECMARK, /* obsolete */
CTA_ZONE,
CTA_SECCTX,
@@ -165,6 +167,15 @@ enum ctattr_protonat {
};
#define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1)
+enum ctattr_seqadj {
+ CTA_SEQADJ_UNSPEC,
+ CTA_SEQADJ_CORRECTION_POS,
+ CTA_SEQADJ_OFFSET_BEFORE,
+ CTA_SEQADJ_OFFSET_AFTER,
+ __CTA_SEQADJ_MAX
+};
+#define CTA_SEQADJ_MAX (__CTA_SEQADJ_MAX - 1)
+
enum ctattr_natseq {
CTA_NAT_SEQ_UNSPEC,
CTA_NAT_SEQ_CORRECTION_POS,
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index 3a9b92147339..0132bad79de7 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -46,6 +46,7 @@ enum nfqnl_attr_type {
NFQA_CT_INFO, /* enum ip_conntrack_info */
NFQA_CAP_LEN, /* __u32 length of captured packet */
NFQA_SKB_INFO, /* __u32 skb meta information */
+ NFQA_EXP, /* nf_conntrack_netlink.h */
__NFQA_MAX
};
diff --git a/include/linux/netfilter/xt_HMARK.h b/include/uapi/linux/netfilter/xt_HMARK.h
index 826fc5807577..826fc5807577 100644
--- a/include/linux/netfilter/xt_HMARK.h
+++ b/include/uapi/linux/netfilter/xt_HMARK.h
diff --git a/include/uapi/linux/netfilter/xt_SYNPROXY.h b/include/uapi/linux/netfilter/xt_SYNPROXY.h
new file mode 100644
index 000000000000..2d59fbaa93c6
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_SYNPROXY.h
@@ -0,0 +1,16 @@
+#ifndef _XT_SYNPROXY_H
+#define _XT_SYNPROXY_H
+
+#define XT_SYNPROXY_OPT_MSS 0x01
+#define XT_SYNPROXY_OPT_WSCALE 0x02
+#define XT_SYNPROXY_OPT_SACK_PERM 0x04
+#define XT_SYNPROXY_OPT_TIMESTAMP 0x08
+#define XT_SYNPROXY_OPT_ECN 0x10
+
+struct xt_synproxy_info {
+ __u8 options;
+ __u8 wscale;
+ __u16 mss;
+};
+
+#endif /* _XT_SYNPROXY_H */
diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/uapi/linux/netfilter/xt_rpfilter.h
index 8358d4f71952..8358d4f71952 100644
--- a/include/linux/netfilter/xt_rpfilter.h
+++ b/include/uapi/linux/netfilter/xt_rpfilter.h
diff --git a/include/uapi/linux/netfilter_bridge/ebt_802_3.h b/include/uapi/linux/netfilter_bridge/ebt_802_3.h
index 5bf84912a082..f37522aade24 100644
--- a/include/uapi/linux/netfilter_bridge/ebt_802_3.h
+++ b/include/uapi/linux/netfilter_bridge/ebt_802_3.h
@@ -2,6 +2,7 @@
#define _UAPI__LINUX_BRIDGE_EBT_802_3_H
#include <linux/types.h>
+#include <linux/if_ether.h>
#define EBT_802_3_SAP 0x01
#define EBT_802_3_TYPE 0x02
@@ -42,8 +43,8 @@ struct hdr_ni {
};
struct ebt_802_3_hdr {
- __u8 daddr[6];
- __u8 saddr[6];
+ __u8 daddr[ETH_ALEN];
+ __u8 saddr[ETH_ALEN];
__be16 len;
union {
struct hdr_ui ui;
diff --git a/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h
index c6a204c97047..eac0f6548f47 100644
--- a/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h
+++ b/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h
@@ -2,6 +2,7 @@
#define _IPT_CLUSTERIP_H_target
#include <linux/types.h>
+#include <linux/if_ether.h>
enum clusterip_hashmode {
CLUSTERIP_HASHMODE_SIP = 0,
@@ -22,7 +23,7 @@ struct ipt_clusterip_tgt_info {
__u32 flags;
/* only relevant for new ones */
- __u8 clustermac[6];
+ __u8 clustermac[ETH_ALEN];
__u16 num_total_nodes;
__u16 num_local_nodes;
__u16 local_nodes[CLUSTERIP_MAX_NODES];
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 8137dd8d2adf..29bed72a4ac4 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -71,6 +71,20 @@
* @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element.
* @NFC_CMD_FW_DOWNLOAD: Request to Load/flash firmware, or event to inform
* that some firmware was loaded
+ * @NFC_EVENT_SE_ADDED: Event emitted when a new secure element is discovered.
+ * This typically will be sent whenever a new NFC controller with either
+ * an embedded SE or an UICC one connected to it through SWP.
+ * @NFC_EVENT_SE_REMOVED: Event emitted when a secure element is removed from
+ * the system, as a consequence of e.g. an NFC controller being unplugged.
+ * @NFC_EVENT_SE_CONNECTIVITY: This event is emitted whenever a secure element
+ * is requesting connectivity access. For example a UICC SE may need to
+ * talk with a sleeping modem and will notify this need by sending this
+ * event. It is then up to userspace to decide if it will wake the modem
+ * up or not.
+ * @NFC_EVENT_SE_TRANSACTION: This event is sent when an application running on
+ * a specific SE notifies us about the end of a transaction. The parameter
+ * for this event is the application ID (AID).
+ * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
*/
enum nfc_commands {
NFC_CMD_UNSPEC,
@@ -97,6 +111,9 @@ enum nfc_commands {
NFC_CMD_FW_DOWNLOAD,
NFC_EVENT_SE_ADDED,
NFC_EVENT_SE_REMOVED,
+ NFC_EVENT_SE_CONNECTIVITY,
+ NFC_EVENT_SE_TRANSACTION,
+ NFC_CMD_GET_SE,
/* private: internal use only */
__NFC_CMD_AFTER_LAST
};
@@ -129,6 +146,7 @@ enum nfc_commands {
* @NFC_ATTR_FIRMWARE_NAME: Free format firmware version
* @NFC_ATTR_SE_INDEX: Secure element index
* @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED)
+ * @NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS: Firmware download operation status
*/
enum nfc_attrs {
NFC_ATTR_UNSPEC,
@@ -154,6 +172,8 @@ enum nfc_attrs {
NFC_ATTR_FIRMWARE_NAME,
NFC_ATTR_SE_INDEX,
NFC_ATTR_SE_TYPE,
+ NFC_ATTR_SE_AID,
+ NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 861e5eba3953..fde2c021b26d 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -126,6 +126,31 @@
*/
/**
+ * DOC: packet coalesce support
+ *
+ * In most cases, host that receives IPv4 and IPv6 multicast/broadcast
+ * packets does not do anything with these packets. Therefore the
+ * reception of these unwanted packets causes unnecessary processing
+ * and power consumption.
+ *
+ * Packet coalesce feature helps to reduce number of received interrupts
+ * to host by buffering these packets in firmware/hardware for some
+ * predefined time. Received interrupt will be generated when one of the
+ * following events occur.
+ * a) Expiration of hardware timer whose expiration time is set to maximum
+ * coalescing delay of matching coalesce rule.
+ * b) Coalescing buffer in hardware reaches it's limit.
+ * c) Packet doesn't match any of the configured coalesce rules.
+ *
+ * User needs to configure following parameters for creating a coalesce
+ * rule.
+ * a) Maximum coalescing delay
+ * b) List of packet patterns which needs to be matched
+ * c) Condition for coalescence. pattern 'match' or 'no match'
+ * Multiple such rules can be created.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -648,6 +673,19 @@
* @NL80211_CMD_CRIT_PROTOCOL_STOP: Indicates the connection reliability can
* return back to normal.
*
+ * @NL80211_CMD_GET_COALESCE: Get currently supported coalesce rules.
+ * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules.
+ *
+ * @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the
+ * the new channel information (Channel Switch Announcement - CSA)
+ * in the beacon for some time (as defined in the
+ * %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the
+ * new channel. Userspace provides the new channel information (using
+ * %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel
+ * width). %NL80211_ATTR_CH_SWITCH_BLOCK_TX may be supplied to inform
+ * other station that transmission must be blocked until the channel
+ * switch is complete.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -810,6 +848,11 @@ enum nl80211_commands {
NL80211_CMD_CRIT_PROTOCOL_START,
NL80211_CMD_CRIT_PROTOCOL_STOP,
+ NL80211_CMD_GET_COALESCE,
+ NL80211_CMD_SET_COALESCE,
+
+ NL80211_CMD_CHANNEL_SWITCH,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1436,6 +1479,23 @@ enum nl80211_commands {
* allowed to be used with the first @NL80211_CMD_SET_STATION command to
* update a TDLS peer STA entry.
*
+ * @NL80211_ATTR_COALESCE_RULE: Coalesce rule information.
+ *
+ * @NL80211_ATTR_CH_SWITCH_COUNT: u32 attribute specifying the number of TBTT's
+ * until the channel switch event.
+ * @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission
+ * must be blocked on the current channel (before the channel switch
+ * operation).
+ * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
+ * for the time while performing a channel switch.
+ * @NL80211_ATTR_CSA_C_OFF_BEACON: Offset of the channel switch counter
+ * field in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
+ * @NL80211_ATTR_CSA_C_OFF_PRESP: Offset of the channel switch counter
+ * field in the probe response (%NL80211_ATTR_PROBE_RESP).
+ *
+ * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
+ * As specified in the &enum nl80211_rxmgmt_flags.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1736,6 +1796,16 @@ enum nl80211_attrs {
NL80211_ATTR_PEER_AID,
+ NL80211_ATTR_COALESCE_RULE,
+
+ NL80211_ATTR_CH_SWITCH_COUNT,
+ NL80211_ATTR_CH_SWITCH_BLOCK_TX,
+ NL80211_ATTR_CSA_IES,
+ NL80211_ATTR_CSA_C_OFF_BEACON,
+ NL80211_ATTR_CSA_C_OFF_PRESP,
+
+ NL80211_ATTR_RXMGMT_FLAGS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2773,6 +2843,21 @@ enum nl80211_chan_width {
};
/**
+ * enum nl80211_bss_scan_width - control channel width for a BSS
+ *
+ * These values are used with the %NL80211_BSS_CHAN_WIDTH attribute.
+ *
+ * @NL80211_BSS_CHAN_WIDTH_20: control channel is 20 MHz wide or compatible
+ * @NL80211_BSS_CHAN_WIDTH_10: control channel is 10 MHz wide
+ * @NL80211_BSS_CHAN_WIDTH_5: control channel is 5 MHz wide
+ */
+enum nl80211_bss_scan_width {
+ NL80211_BSS_CHAN_WIDTH_20,
+ NL80211_BSS_CHAN_WIDTH_10,
+ NL80211_BSS_CHAN_WIDTH_5,
+};
+
+/**
* enum nl80211_bss - netlink attributes for a BSS
*
* @__NL80211_BSS_INVALID: invalid
@@ -2796,6 +2881,8 @@ enum nl80211_chan_width {
* @NL80211_BSS_BEACON_IES: binary attribute containing the raw information
* elements from a Beacon frame (bin); not present if no Beacon frame has
* yet been received
+ * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel
+ * (u32, enum nl80211_bss_scan_width)
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -2812,6 +2899,7 @@ enum nl80211_bss {
NL80211_BSS_STATUS,
NL80211_BSS_SEEN_MS_AGO,
NL80211_BSS_BEACON_IES,
+ NL80211_BSS_CHAN_WIDTH,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -3060,11 +3148,11 @@ enum nl80211_tx_power_setting {
};
/**
- * enum nl80211_wowlan_packet_pattern_attr - WoWLAN packet pattern attribute
- * @__NL80211_WOWLAN_PKTPAT_INVALID: invalid number for nested attribute
- * @NL80211_WOWLAN_PKTPAT_PATTERN: the pattern, values where the mask has
+ * enum nl80211_packet_pattern_attr - packet pattern attribute
+ * @__NL80211_PKTPAT_INVALID: invalid number for nested attribute
+ * @NL80211_PKTPAT_PATTERN: the pattern, values where the mask has
* a zero bit are ignored
- * @NL80211_WOWLAN_PKTPAT_MASK: pattern mask, must be long enough to have
+ * @NL80211_PKTPAT_MASK: pattern mask, must be long enough to have
* a bit for each byte in the pattern. The lowest-order bit corresponds
* to the first byte of the pattern, but the bytes of the pattern are
* in a little-endian-like format, i.e. the 9th byte of the pattern
@@ -3075,39 +3163,50 @@ enum nl80211_tx_power_setting {
* Note that the pattern matching is done as though frames were not
* 802.11 frames but 802.3 frames, i.e. the frame is fully unpacked
* first (including SNAP header unpacking) and then matched.
- * @NL80211_WOWLAN_PKTPAT_OFFSET: packet offset, pattern is matched after
+ * @NL80211_PKTPAT_OFFSET: packet offset, pattern is matched after
* these fixed number of bytes of received packet
- * @NUM_NL80211_WOWLAN_PKTPAT: number of attributes
- * @MAX_NL80211_WOWLAN_PKTPAT: max attribute number
+ * @NUM_NL80211_PKTPAT: number of attributes
+ * @MAX_NL80211_PKTPAT: max attribute number
*/
-enum nl80211_wowlan_packet_pattern_attr {
- __NL80211_WOWLAN_PKTPAT_INVALID,
- NL80211_WOWLAN_PKTPAT_MASK,
- NL80211_WOWLAN_PKTPAT_PATTERN,
- NL80211_WOWLAN_PKTPAT_OFFSET,
+enum nl80211_packet_pattern_attr {
+ __NL80211_PKTPAT_INVALID,
+ NL80211_PKTPAT_MASK,
+ NL80211_PKTPAT_PATTERN,
+ NL80211_PKTPAT_OFFSET,
- NUM_NL80211_WOWLAN_PKTPAT,
- MAX_NL80211_WOWLAN_PKTPAT = NUM_NL80211_WOWLAN_PKTPAT - 1,
+ NUM_NL80211_PKTPAT,
+ MAX_NL80211_PKTPAT = NUM_NL80211_PKTPAT - 1,
};
/**
- * struct nl80211_wowlan_pattern_support - pattern support information
+ * struct nl80211_pattern_support - packet pattern support information
* @max_patterns: maximum number of patterns supported
* @min_pattern_len: minimum length of each pattern
* @max_pattern_len: maximum length of each pattern
* @max_pkt_offset: maximum Rx packet offset
*
* This struct is carried in %NL80211_WOWLAN_TRIG_PKT_PATTERN when
- * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED in the
- * capability information given by the kernel to userspace.
+ * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED or in
+ * %NL80211_ATTR_COALESCE_RULE_PKT_PATTERN when that is part of
+ * %NL80211_ATTR_COALESCE_RULE in the capability information given
+ * by the kernel to userspace.
*/
-struct nl80211_wowlan_pattern_support {
+struct nl80211_pattern_support {
__u32 max_patterns;
__u32 min_pattern_len;
__u32 max_pattern_len;
__u32 max_pkt_offset;
} __attribute__((packed));
+/* only for backward compatibility */
+#define __NL80211_WOWLAN_PKTPAT_INVALID __NL80211_PKTPAT_INVALID
+#define NL80211_WOWLAN_PKTPAT_MASK NL80211_PKTPAT_MASK
+#define NL80211_WOWLAN_PKTPAT_PATTERN NL80211_PKTPAT_PATTERN
+#define NL80211_WOWLAN_PKTPAT_OFFSET NL80211_PKTPAT_OFFSET
+#define NUM_NL80211_WOWLAN_PKTPAT NUM_NL80211_PKTPAT
+#define MAX_NL80211_WOWLAN_PKTPAT MAX_NL80211_PKTPAT
+#define nl80211_wowlan_pattern_support nl80211_pattern_support
+
/**
* enum nl80211_wowlan_triggers - WoWLAN trigger definitions
* @__NL80211_WOWLAN_TRIG_INVALID: invalid number for nested attributes
@@ -3127,7 +3226,7 @@ struct nl80211_wowlan_pattern_support {
* pattern matching is done after the packet is converted to the MSDU.
*
* In %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, it is a binary attribute
- * carrying a &struct nl80211_wowlan_pattern_support.
+ * carrying a &struct nl80211_pattern_support.
*
* When reporting wakeup. it is a u32 attribute containing the 0-based
* index of the pattern that caused the wakeup, in the patterns passed
@@ -3284,7 +3383,7 @@ struct nl80211_wowlan_tcp_data_token_feature {
* @NL80211_WOWLAN_TCP_WAKE_PAYLOAD: wake packet payload, for advertising a
* u32 attribute holding the maximum length
* @NL80211_WOWLAN_TCP_WAKE_MASK: Wake packet payload mask, not used for
- * feature advertising. The mask works like @NL80211_WOWLAN_PKTPAT_MASK
+ * feature advertising. The mask works like @NL80211_PKTPAT_MASK
* but on the TCP payload only.
* @NUM_NL80211_WOWLAN_TCP: number of TCP attributes
* @MAX_NL80211_WOWLAN_TCP: highest attribute number
@@ -3309,6 +3408,55 @@ enum nl80211_wowlan_tcp_attrs {
};
/**
+ * struct nl80211_coalesce_rule_support - coalesce rule support information
+ * @max_rules: maximum number of rules supported
+ * @pat: packet pattern support information
+ * @max_delay: maximum supported coalescing delay in msecs
+ *
+ * This struct is carried in %NL80211_ATTR_COALESCE_RULE in the
+ * capability information given by the kernel to userspace.
+ */
+struct nl80211_coalesce_rule_support {
+ __u32 max_rules;
+ struct nl80211_pattern_support pat;
+ __u32 max_delay;
+} __attribute__((packed));
+
+/**
+ * enum nl80211_attr_coalesce_rule - coalesce rule attribute
+ * @__NL80211_COALESCE_RULE_INVALID: invalid number for nested attribute
+ * @NL80211_ATTR_COALESCE_RULE_DELAY: delay in msecs used for packet coalescing
+ * @NL80211_ATTR_COALESCE_RULE_CONDITION: condition for packet coalescence,
+ * see &enum nl80211_coalesce_condition.
+ * @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN: packet offset, pattern is matched
+ * after these fixed number of bytes of received packet
+ * @NUM_NL80211_ATTR_COALESCE_RULE: number of attributes
+ * @NL80211_ATTR_COALESCE_RULE_MAX: max attribute number
+ */
+enum nl80211_attr_coalesce_rule {
+ __NL80211_COALESCE_RULE_INVALID,
+ NL80211_ATTR_COALESCE_RULE_DELAY,
+ NL80211_ATTR_COALESCE_RULE_CONDITION,
+ NL80211_ATTR_COALESCE_RULE_PKT_PATTERN,
+
+ /* keep last */
+ NUM_NL80211_ATTR_COALESCE_RULE,
+ NL80211_ATTR_COALESCE_RULE_MAX = NUM_NL80211_ATTR_COALESCE_RULE - 1
+};
+
+/**
+ * enum nl80211_coalesce_condition - coalesce rule conditions
+ * @NL80211_COALESCE_CONDITION_MATCH: coalaesce Rx packets when patterns
+ * in a rule are matched.
+ * @NL80211_COALESCE_CONDITION_NO_MATCH: coalesce Rx packets when patterns
+ * in a rule are not matched.
+ */
+enum nl80211_coalesce_condition {
+ NL80211_COALESCE_CONDITION_MATCH,
+ NL80211_COALESCE_CONDITION_NO_MATCH
+};
+
+/**
* enum nl80211_iface_limit_attrs - limit attributes
* @NL80211_IFACE_LIMIT_UNSPEC: (reserved)
* @NL80211_IFACE_LIMIT_MAX: maximum number of interfaces that
@@ -3758,4 +3906,15 @@ enum nl80211_crit_proto_id {
/* maximum duration for critical protocol measures */
#define NL80211_CRIT_PROTO_MAX_DURATION 5000 /* msec */
+/**
+ * enum nl80211_rxmgmt_flags - flags for received management frame.
+ *
+ * Used by cfg80211_rx_mgmt()
+ *
+ * @NL80211_RXMGMT_FLAG_ANSWERED: frame was answered by device/driver.
+ */
+enum nl80211_rxmgmt_flags {
+ NL80211_RXMGMT_FLAG_ANSWERED = 1 << 0,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index c55efaaa9bb4..a74d375b439b 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -165,6 +165,7 @@ enum ovs_vport_type {
OVS_VPORT_TYPE_NETDEV, /* network device */
OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
OVS_VPORT_TYPE_GRE, /* GRE tunnel. */
+ OVS_VPORT_TYPE_VXLAN, /* VXLAN tunnel. */
__OVS_VPORT_TYPE_MAX
};
@@ -211,6 +212,16 @@ enum ovs_vport_attr {
#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+/* OVS_VPORT_ATTR_OPTIONS attributes for tunnels.
+ */
+enum {
+ OVS_TUNNEL_ATTR_UNSPEC,
+ OVS_TUNNEL_ATTR_DST_PORT, /* 16-bit UDP port, used by L4 tunnels. */
+ __OVS_TUNNEL_ATTR_MAX
+};
+
+#define OVS_TUNNEL_ATTR_MAX (__OVS_TUNNEL_ATTR_MAX - 1)
+
/* Flows. */
#define OVS_FLOW_FAMILY "ovs_flow"
@@ -248,6 +259,7 @@ enum ovs_key_attr {
OVS_KEY_ATTR_ND, /* struct ovs_key_nd */
OVS_KEY_ATTR_SKB_MARK, /* u32 skb mark */
OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */
+ OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */
#ifdef __KERNEL__
OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */
@@ -322,6 +334,11 @@ struct ovs_key_udp {
__be16 udp_dst;
};
+struct ovs_key_sctp {
+ __be16 sctp_src;
+ __be16 sctp_dst;
+};
+
struct ovs_key_icmp {
__u8 icmp_type;
__u8 icmp_code;
@@ -368,6 +385,12 @@ struct ovs_key_nd {
* @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the
* last-used time, accumulated TCP flags, and statistics for this flow.
* Otherwise ignored in requests. Never present in notifications.
+ * @OVS_FLOW_ATTR_MASK: Nested %OVS_KEY_ATTR_* attributes specifying the
+ * mask bits for wildcarded flow match. Mask bit value '1' specifies exact
+ * match with corresponding flow key bit, while mask bit value '0' specifies
+ * a wildcarded match. Omitting attribute is treated as wildcarding all
+ * corresponding fields. Optional for all requests. If not present,
+ * all flow key bits are exact match bits.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_FLOW_* commands.
@@ -380,6 +403,7 @@ enum ovs_flow_attr {
OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */
OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */
OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */
+ OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */
__OVS_FLOW_ATTR_MAX
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 09d62b9228ff..9b829134d422 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -744,4 +744,45 @@ struct tc_fq_codel_xstats {
};
};
+/* FQ */
+
+enum {
+ TCA_FQ_UNSPEC,
+
+ TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
+
+ TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
+
+ TCA_FQ_QUANTUM, /* RR quantum */
+
+ TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
+
+ TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
+
+ TCA_FQ_FLOW_DEFAULT_RATE,/* for sockets with unspecified sk_rate,
+ * use the following rate
+ */
+
+ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
+
+ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
+ __TCA_FQ_MAX
+};
+
+#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
+
+struct tc_fq_qd_stats {
+ __u64 gc_flows;
+ __u64 highprio_packets;
+ __u64 tcp_retrans;
+ __u64 throttled;
+ __u64 flows_plimit;
+ __u64 pkts_too_long;
+ __u64 allocation_errors;
+ __s64 time_next_delayed_flow;
+ __u32 flows;
+ __u32 inactive_flows;
+ __u32 throttled_flows;
+ __u32 pad;
+};
#endif
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 66b466e4ca08..ca451e99b28b 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -28,7 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index a1356d3b54df..1bdb4a39d1e1 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -51,6 +51,10 @@ enum
IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */
IPSTATS_MIB_OUTBCASTOCTETS, /* OutBcastOctets */
IPSTATS_MIB_CSUMERRORS, /* InCsumErrors */
+ IPSTATS_MIB_NOECTPKTS, /* InNoECTPkts */
+ IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
+ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
+ IPSTATS_MIB_CEPKTS, /* InCEPkts */
__IPSTATS_MIB_MAX
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 8d776ebc4829..377f1e59411d 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -111,6 +111,7 @@ enum {
#define TCP_REPAIR_OPTIONS 22
#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
#define TCP_TIMESTAMP 24
+#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */
struct tcp_repair_opt {
__u32 opt_code;
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index c520203fac2f..172a7f00780c 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -60,7 +60,7 @@
struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
- __u8 mac[6];
+ __u8 mac[ETH_ALEN];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
__u16 status;
/* Maximum number of each of transmit and receive queues;
@@ -70,7 +70,9 @@ struct virtio_net_config {
__u16 max_virtqueue_pairs;
} __attribute__((packed));
-/* This is the first element of the scatter-gather list. If you don't
+/* This header comes first in the scatter-gather list.
+ * If VIRTIO_F_ANY_LAYOUT is not negotiated, it must
+ * be the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header. */
struct virtio_net_hdr {
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
diff --git a/include/uapi/linux/wimax/i2400m.h b/include/uapi/linux/wimax/i2400m.h
index 62d356153565..fd198bc24a3c 100644
--- a/include/uapi/linux/wimax/i2400m.h
+++ b/include/uapi/linux/wimax/i2400m.h
@@ -122,7 +122,7 @@
#define __LINUX__WIMAX__I2400M_H__
#include <linux/types.h>
-
+#include <linux/if_ether.h>
/*
* Host Device Interface (HDI) common to all busses
@@ -487,7 +487,7 @@ struct i2400m_tlv_l4_message_versions {
struct i2400m_tlv_detailed_device_info {
struct i2400m_tlv_hdr hdr;
__u8 reserved1[400];
- __u8 mac_address[6];
+ __u8 mac_address[ETH_ALEN];
__u8 reserved2[2];
} __attribute__((packed));
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 2fb2d88e8c2e..61fc573f1142 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -210,6 +210,7 @@ out_vid_del:
static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
{
struct net_device *new_dev;
+ struct vlan_dev_priv *vlan;
struct net *net = dev_net(real_dev);
struct vlan_net *vn = net_generic(net, vlan_net_id);
char name[IFNAMSIZ];
@@ -260,11 +261,12 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
new_dev->mtu = real_dev->mtu;
new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
- vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q);
- vlan_dev_priv(new_dev)->vlan_id = vlan_id;
- vlan_dev_priv(new_dev)->real_dev = real_dev;
- vlan_dev_priv(new_dev)->dent = NULL;
- vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+ vlan = vlan_dev_priv(new_dev);
+ vlan->vlan_proto = htons(ETH_P_8021Q);
+ vlan->vlan_id = vlan_id;
+ vlan->real_dev = real_dev;
+ vlan->dent = NULL;
+ vlan->flags = VLAN_FLAG_REORDER_HDR;
new_dev->rtnl_link_ops = &vlan_link_ops;
err = register_vlan_dev(new_dev);
@@ -459,6 +461,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
/* Propagate to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
call_netdevice_notifiers(event, vlandev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 1cd3d2a406f5..09bf1c38805b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -107,10 +107,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 vlan_tci = 0;
int rc;
- if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+ if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
- vlan_tci = vlan_dev_priv(dev)->vlan_id;
+ vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
vhdr->h_vlan_TCI = htons(vlan_tci);
@@ -133,7 +133,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
saddr = dev->dev_addr;
/* Now make the underlying real hard header */
- dev = vlan_dev_priv(dev)->real_dev;
+ dev = vlan->real_dev;
rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
if (rc > 0)
rc += vhdrlen;
@@ -582,7 +582,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->dev_id = real_dev->dev_id;
if (is_zero_ether_addr(dev->dev_addr))
- memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len);
+ eth_hw_addr_inherit(dev, real_dev);
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
diff --git a/net/9p/client.c b/net/9p/client.c
index 8b93cae2d11d..ba93bdab2701 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -658,17 +658,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
/*
* if we haven't received a response for oldreq,
- * remove it from the list, and notify the transport
- * layer that the reply will never arrive.
+ * remove it from the list
*/
- spin_lock(&c->lock);
if (oldreq->status == REQ_STATUS_FLSH) {
+ spin_lock(&c->lock);
list_del(&oldreq->req_list);
spin_unlock(&c->lock);
- if (c->trans_mod->cancelled)
- c->trans_mod->cancelled(c, req);
- } else {
- spin_unlock(&c->lock);
}
p9_free_req(c, req);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 928f2bb9bf8d..8f68df5d2973 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -588,17 +588,6 @@ static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
return 1;
}
-/* A request has been fully flushed without a reply.
- * That means we have posted one buffer in excess.
- */
-static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
-{
- struct p9_trans_rdma *rdma = client->trans;
-
- atomic_inc(&rdma->excess_rc);
- return 0;
-}
-
/**
* trans_create_rdma - Transport method for creating atransport instance
* @client: client instance
diff --git a/net/Kconfig b/net/Kconfig
index 2b406608a1a4..ee0213667272 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -281,7 +281,7 @@ menu "Network testing"
config NET_PKTGEN
tristate "Packet Generator (USE WITH CAUTION)"
- depends on PROC_FS
+ depends on INET && PROC_FS
---help---
This module will inject preconfigured packets, at a configurable
rate, out of a given interface. It is used for network interface
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index c30f3a0717fb..af46bc49e1e9 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -178,7 +178,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
at = at_sk(s);
seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
- "%02X %d\n",
+ "%02X %u\n",
s->sk_type, ntohs(at->src_net), at->src_node, at->src_port,
ntohs(at->dest_net), at->dest_node, at->dest_port,
sk_wmem_alloc_get(s),
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 62da5278014a..0a8a80cd4bf1 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -28,6 +28,22 @@
#include "bat_algo.h"
#include "network-coding.h"
+
+/**
+ * batadv_dup_status - duplicate status
+ * @BATADV_NO_DUP: the packet is a duplicate
+ * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
+ * neighbor)
+ * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
+ * @BATADV_PROTECTED: originator is currently protected (after reboot)
+ */
+enum batadv_dup_status {
+ BATADV_NO_DUP = 0,
+ BATADV_ORIG_DUP,
+ BATADV_NEIGH_DUP,
+ BATADV_PROTECTED,
+};
+
/**
* batadv_ring_buffer_set - update the ring buffer with the given value
* @lq_recv: pointer to the ring buffer
@@ -71,21 +87,6 @@ static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
return (uint8_t)(sum / count);
}
-/*
- * batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is a duplicate
- * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
- * neighbor)
- * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
- * @BATADV_PROTECTED: originator is currently protected (after reboot)
- */
-enum batadv_dup_status {
- BATADV_NO_DUP = 0,
- BATADV_ORIG_DUP,
- BATADV_NEIGH_DUP,
- BATADV_PROTECTED,
-};
-
static struct batadv_neigh_node *
batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
const uint8_t *neigh_addr,
@@ -478,6 +479,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
kfree(forw_packet_aggr);
goto out;
}
+ forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 7614af31daff..1ce4b8763ef2 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -190,6 +190,33 @@ next:
return curr_gw;
}
+/**
+ * batadv_gw_check_client_stop - check if client mode has been switched off
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * This function assumes the caller has checked that the gw state *is actually
+ * changing*. This function is not supposed to be called when there is no state
+ * change.
+ */
+void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
+{
+ struct batadv_gw_node *curr_gw;
+
+ if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
+ return;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ if (!curr_gw)
+ return;
+
+ /* if batman-adv is switching the gw client mode off and a gateway was
+ * already selected, send a DEL uevent
+ */
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL);
+
+ batadv_gw_node_free_ref(curr_gw);
+}
+
void batadv_gw_election(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 1037d75da51f..ceef4ebe8bcd 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -20,6 +20,7 @@
#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
+void batadv_gw_check_client_stop(struct batadv_priv *bat_priv);
void batadv_gw_deselect(struct batadv_priv *bat_priv);
void batadv_gw_election(struct batadv_priv *bat_priv);
struct batadv_orig_node *
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index b27508b8085c..5a99bb4b6b82 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -183,6 +183,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
goto out;
}
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 08125f3f6064..c72d1bcdcf49 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -19,6 +19,10 @@
#include <linux/crc32c.h>
#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/dsfield.h>
#include "main.h"
#include "sysfs.h"
#include "debugfs.h"
@@ -249,6 +253,60 @@ out:
return primary_if;
}
+/**
+ * batadv_skb_set_priority - sets skb priority according to packet content
+ * @skb: the packet to be sent
+ * @offset: offset to the packet content
+ *
+ * This function sets a value between 256 and 263 (802.1d priority), which
+ * can be interpreted by the cfg80211 or other drivers.
+ */
+void batadv_skb_set_priority(struct sk_buff *skb, int offset)
+{
+ struct iphdr ip_hdr_tmp, *ip_hdr;
+ struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
+ struct ethhdr ethhdr_tmp, *ethhdr;
+ struct vlan_ethhdr *vhdr, vhdr_tmp;
+ u32 prio;
+
+ /* already set, do nothing */
+ if (skb->priority >= 256 && skb->priority <= 263)
+ return;
+
+ ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
+ if (!ethhdr)
+ return;
+
+ switch (ethhdr->h_proto) {
+ case htons(ETH_P_8021Q):
+ vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
+ sizeof(*vhdr), &vhdr_tmp);
+ if (!vhdr)
+ return;
+ prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
+ prio = prio >> VLAN_PRIO_SHIFT;
+ break;
+ case htons(ETH_P_IP):
+ ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
+ sizeof(*ip_hdr), &ip_hdr_tmp);
+ if (!ip_hdr)
+ return;
+ prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
+ break;
+ case htons(ETH_P_IPV6):
+ ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
+ sizeof(*ip6_hdr), &ip6_hdr_tmp);
+ if (!ip6_hdr)
+ return;
+ prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
+ break;
+ default:
+ return;
+ }
+
+ skb->priority = prio + 256;
+}
+
static int batadv_recv_unhandled_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 5e9aebb7d56b..24675523930f 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -26,7 +26,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2013.3.0"
+#define BATADV_SOURCE_VERSION "2013.4.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -184,6 +184,7 @@ void batadv_mesh_free(struct net_device *soft_iface);
int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get(struct seq_file *seq);
+void batadv_skb_set_priority(struct sk_buff *skb, int offset);
int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev);
@@ -253,7 +254,7 @@ static inline void batadv_dbg(int type __always_unused,
/* returns 1 if they are the same ethernet addr
*
- * note: can't use compare_ether_addr() as it requires aligned memory
+ * note: can't use ether_addr_equal() as it requires aligned memory
*/
static inline int batadv_compare_eth(const void *data1, const void *data2)
{
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 2f0bd3ffe6e8..0439395d7ba5 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -775,7 +775,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
struct batadv_neigh_node *neigh_node = NULL;
struct batadv_unicast_packet *unicast_packet;
struct ethhdr *ethhdr = eth_hdr(skb);
- int res, ret = NET_RX_DROP;
+ int res, hdr_len, ret = NET_RX_DROP;
struct sk_buff *new_skb;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -835,6 +835,22 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
/* decrement ttl */
unicast_packet->header.ttl--;
+ switch (unicast_packet->header.packet_type) {
+ case BATADV_UNICAST_4ADDR:
+ hdr_len = sizeof(struct batadv_unicast_4addr_packet);
+ break;
+ case BATADV_UNICAST:
+ hdr_len = sizeof(struct batadv_unicast_packet);
+ break;
+ default:
+ /* other packet types not supported - yet */
+ hdr_len = -1;
+ break;
+ }
+
+ if (hdr_len > 0)
+ batadv_skb_set_priority(skb, hdr_len);
+
res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
/* translate transmit result into receive result */
@@ -1193,6 +1209,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
if (batadv_bla_check_bcast_duplist(bat_priv, skb))
goto out;
+ batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet));
+
/* rebroadcast packet */
batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index e9ff8d801201..0266edd0fa7f 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -67,7 +67,6 @@ int batadv_send_skb_packet(struct sk_buff *skb,
ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
skb_set_network_header(skb, ETH_HLEN);
- skb->priority = TC_PRIO_CONTROL;
skb->protocol = __constant_htons(ETH_P_BATMAN);
skb->dev = hard_iface->net_dev;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 0f04e1c302b4..4493913f0d5c 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -229,6 +229,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
*/
}
+ batadv_skb_set_priority(skb, 0);
+
/* ethernet packet should be broadcasted */
if (do_bcast) {
primary_if = batadv_primary_if_get_selected(bat_priv);
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 929e304dacb2..4114b961bc2c 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -385,6 +385,10 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
curr_gw_mode_str, buff);
batadv_gw_deselect(bat_priv);
+ /* always call batadv_gw_check_client_stop() before changing the gateway
+ * state
+ */
+ batadv_gw_check_client_stop(bat_priv);
atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
return count;
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 429aeef3d8b2..34510f38708f 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1626,6 +1626,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
if (!skb)
goto out;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
tt_response->ttvn = ttvn;
@@ -1691,6 +1692,7 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
if (!skb)
goto out;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
tt_req_len = sizeof(*tt_request);
@@ -1788,6 +1790,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
if (!skb)
goto unlock;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
packet_pos = skb_put(skb, len);
tt_response = (struct batadv_tt_query_packet *)packet_pos;
@@ -1906,6 +1909,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
if (!skb)
goto unlock;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
packet_pos = skb_put(skb, len);
tt_response = (struct batadv_tt_query_packet *)packet_pos;
@@ -2240,6 +2244,7 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
if (!skb)
goto out;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 857e1b8349ee..48b31d33ce6b 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -242,6 +242,8 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
if (!frag_skb)
goto dropped;
+
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(frag_skb, ucf_hdr_len);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 4983340f1943..d8ea31a58457 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -397,6 +397,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
kfree(info);
return NULL;
}
+ info->skb_packet->priority = TC_PRIO_CONTROL;
skb_reserve(info->skb_packet, ETH_HLEN);
packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
@@ -861,6 +862,7 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
if (!bat_priv->vis.my_info->skb_packet)
goto free_info;
+ bat_priv->vis.my_info->skb_packet->priority = TC_PRIO_CONTROL;
skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
tmp_skb = bat_priv->vis.my_info->skb_packet;
packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 6c7f36379722..f0817121ec5e 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -31,6 +31,24 @@
#include <net/bluetooth/a2mp.h>
#include <net/bluetooth/smp.h>
+struct sco_param {
+ u16 pkt_type;
+ u16 max_latency;
+};
+
+static const struct sco_param sco_param_cvsd[] = {
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
+ { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */
+ { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */
+ { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */
+};
+
+static const struct sco_param sco_param_wideband[] = {
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
+ { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
+};
+
static void hci_le_create_connection(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -172,10 +190,11 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle)
hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
}
-void hci_setup_sync(struct hci_conn *conn, __u16 handle)
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_setup_sync_conn cp;
+ const struct sco_param *param;
BT_DBG("hcon %p", conn);
@@ -185,15 +204,35 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
conn->attempt++;
cp.handle = cpu_to_le16(handle);
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
- cp.max_latency = __constant_cpu_to_le16(0xffff);
- cp.voice_setting = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ cp.voice_setting = cpu_to_le16(conn->setting);
+
+ switch (conn->setting & SCO_AIRMODE_MASK) {
+ case SCO_AIRMODE_TRANSP:
+ if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
+ return false;
+ cp.retrans_effort = 0x02;
+ param = &sco_param_wideband[conn->attempt - 1];
+ break;
+ case SCO_AIRMODE_CVSD:
+ if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
+ return false;
+ cp.retrans_effort = 0x01;
+ param = &sco_param_cvsd[conn->attempt - 1];
+ break;
+ default:
+ return false;
+ }
- hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
+ cp.pkt_type = __cpu_to_le16(param->pkt_type);
+ cp.max_latency = __cpu_to_le16(param->max_latency);
+
+ if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
+ return false;
+
+ return true;
}
void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
@@ -560,13 +599,13 @@ static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
return acl;
}
-static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
- bdaddr_t *dst, u8 sec_level, u8 auth_type)
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u16 setting)
{
struct hci_conn *acl;
struct hci_conn *sco;
- acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
+ acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (IS_ERR(acl))
return acl;
@@ -584,6 +623,8 @@ static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
hci_conn_hold(sco);
+ sco->setting = setting;
+
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
@@ -612,9 +653,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
case ACL_LINK:
return hci_connect_acl(hdev, dst, sec_level, auth_type);
- case SCO_LINK:
- case ESCO_LINK:
- return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
}
return ERR_PTR(-EINVAL);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index cc27297da5a9..634debab4d54 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -454,6 +454,18 @@ static void hci_setup_event_mask(struct hci_request *req)
events[4] |= 0x04; /* Read Remote Extended Features Complete */
events[5] |= 0x08; /* Synchronous Connection Complete */
events[5] |= 0x10; /* Synchronous Connection Changed */
+ } else {
+ /* Use a different default for LE-only devices */
+ memset(events, 0, sizeof(events));
+ events[0] |= 0x10; /* Disconnection Complete */
+ events[0] |= 0x80; /* Encryption Change */
+ events[1] |= 0x08; /* Read Remote Version Information Complete */
+ events[1] |= 0x20; /* Command Complete */
+ events[1] |= 0x40; /* Command Status */
+ events[1] |= 0x80; /* Hardware Error */
+ events[2] |= 0x04; /* Number of Completed Packets */
+ events[3] |= 0x02; /* Data Buffer Overflow */
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
}
if (lmp_inq_rssi_capable(hdev))
@@ -608,7 +620,7 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
* as supported send it. If not supported assume that the controller
* does not have actual support for stored link keys which makes this
* command redundant anyway.
- */
+ */
if (hdev->commands[6] & 0x80) {
struct hci_cp_delete_stored_link_key cp;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 0437200d92f4..94aab73f89d4 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2904,15 +2904,16 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
hci_conn_add_sysfs(conn);
break;
+ case 0x0d: /* Connection Rejected due to Limited Resources */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
case 0x1f: /* Unspecified error */
- if (conn->out && conn->attempt < 2) {
+ if (conn->out) {
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
(hdev->esco_type & EDR_ESCO_MASK);
- hci_setup_sync(conn, conn->link->handle);
- goto unlock;
+ if (hci_setup_sync(conn, conn->link->handle))
+ goto unlock;
}
/* fall through */
@@ -3024,17 +3025,20 @@ unlock:
static u8 hci_get_auth_req(struct hci_conn *conn)
{
/* If remote requests dedicated bonding follow that lead */
- if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
+ if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
+ conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
/* If both remote and local IO capabilities allow MITM
* protection then require it, otherwise don't */
- if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
- return 0x02;
+ if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
+ conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
+ return HCI_AT_DEDICATED_BONDING;
else
- return 0x03;
+ return HCI_AT_DEDICATED_BONDING_MITM;
}
/* If remote requests no-bonding follow that lead */
- if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
+ if (conn->remote_auth == HCI_AT_NO_BONDING ||
+ conn->remote_auth == HCI_AT_NO_BONDING_MITM)
return conn->remote_auth | (conn->auth_type & 0x01);
return conn->auth_type;
@@ -3066,7 +3070,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* Change the IO capability from KeyboardDisplay
* to DisplayYesNo as it is not supported by BT spec. */
cp.capability = (conn->io_capability == 0x04) ?
- 0x01 : conn->io_capability;
+ HCI_IO_DISPLAY_YESNO : conn->io_capability;
conn->auth_type = hci_get_auth_req(conn);
cp.authentication = conn->auth_type;
@@ -3140,7 +3144,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
* request. The only exception is when we're dedicated bonding
* initiators (connect_cfm_cb set) since then we always have the MITM
* bit set. */
- if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
+ if (!conn->connect_cfm_cb && loc_mitm &&
+ conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
BT_DBG("Rejecting request: remote device can't provide MITM");
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3148,8 +3153,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
}
/* If no side requires MITM protection; auto-accept */
- if ((!loc_mitm || conn->remote_cap == 0x03) &&
- (!rem_mitm || conn->io_capability == 0x03)) {
+ if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
+ (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
/* If we're not the initiators request authorization to
* proceed from user space (mgmt_user_confirm with
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 0c699cdc3696..13863de433a4 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -238,6 +238,31 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep
return hidp_send_intr_message(session, hdr, buf, rsize);
}
+static int hidp_hidinput_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct hidp_session *session = hid->driver_data;
+ struct hid_field *field;
+ int offset;
+
+ BT_DBG("session %p type %d code %d value %d",
+ session, type, code, value);
+
+ if (type != EV_LED)
+ return -1;
+
+ offset = hidinput_find_field(hid, type, code, &field);
+ if (offset == -1) {
+ hid_warn(dev, "event field not found\n");
+ return -1;
+ }
+
+ hid_set_field(field, offset, value);
+
+ return hidp_send_report(session, field->report);
+}
+
static int hidp_get_raw_report(struct hid_device *hid,
unsigned char report_number,
unsigned char *data, size_t count,
@@ -678,20 +703,6 @@ static int hidp_parse(struct hid_device *hid)
static int hidp_start(struct hid_device *hid)
{
- struct hidp_session *session = hid->driver_data;
- struct hid_report *report;
-
- if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
- return 0;
-
- list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
- report_list, list)
- hidp_send_report(session, report);
-
- list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].
- report_list, list)
- hidp_send_report(session, report);
-
return 0;
}
@@ -711,6 +722,7 @@ static struct hid_ll_driver hidp_hid_driver = {
.stop = hidp_stop,
.open = hidp_open,
.close = hidp_close,
+ .hidinput_input_event = hidp_hidinput_event,
};
/* This function sets up the hid device. It does not add it
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 8c3499bec893..b3bb7bca8e60 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1415,8 +1415,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
sk->sk_state_change(sk);
release_sock(sk);
- } else if (chan->state == BT_CONNECT)
+ } else if (chan->state == BT_CONNECT) {
l2cap_do_start(chan);
+ }
l2cap_chan_unlock(chan);
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index b6e44ad6cca6..6d126faf145f 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -58,7 +58,6 @@ struct rfcomm_dev {
uint modem_status;
struct rfcomm_dlc *dlc;
- wait_queue_head_t wait;
struct device *tty_dev;
@@ -76,13 +75,6 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
/* ---- Device functions ---- */
-/*
- * The reason this isn't actually a race, as you no doubt have a little voice
- * screaming at you in your head, is that the refcount should never actually
- * reach zero unless the device has already been taken off the list, in
- * rfcomm_dev_del(). And if that's not true, we'll hit the BUG() in
- * rfcomm_dev_destruct() anyway.
- */
static void rfcomm_dev_destruct(struct tty_port *port)
{
struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
@@ -90,10 +82,9 @@ static void rfcomm_dev_destruct(struct tty_port *port)
BT_DBG("dev %p dlc %p", dev, dlc);
- /* Refcount should only hit zero when called from rfcomm_dev_del()
- which will have taken us off the list. Everything else are
- refcounting bugs. */
- BUG_ON(!list_empty(&dev->list));
+ spin_lock(&rfcomm_dev_lock);
+ list_del(&dev->list);
+ spin_unlock(&rfcomm_dev_lock);
rfcomm_dlc_lock(dlc);
/* Detach DLC if it's owned by this dev */
@@ -112,8 +103,39 @@ static void rfcomm_dev_destruct(struct tty_port *port)
module_put(THIS_MODULE);
}
+/* device-specific initialization: open the dlc */
+static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+ return rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel);
+}
+
+/* we block the open until the dlc->state becomes BT_CONNECTED */
+static int rfcomm_dev_carrier_raised(struct tty_port *port)
+{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+ return (dev->dlc->state == BT_CONNECTED);
+}
+
+/* device-specific cleanup: close the dlc */
+static void rfcomm_dev_shutdown(struct tty_port *port)
+{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+ if (dev->tty_dev->parent)
+ device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
+
+ /* close the dlc */
+ rfcomm_dlc_close(dev->dlc, 0);
+}
+
static const struct tty_port_operations rfcomm_port_ops = {
.destruct = rfcomm_dev_destruct,
+ .activate = rfcomm_dev_activate,
+ .shutdown = rfcomm_dev_shutdown,
+ .carrier_raised = rfcomm_dev_carrier_raised,
};
static struct rfcomm_dev *__rfcomm_dev_get(int id)
@@ -236,7 +258,6 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
tty_port_init(&dev->port);
dev->port.ops = &rfcomm_port_ops;
- init_waitqueue_head(&dev->wait);
skb_queue_head_init(&dev->pending);
@@ -282,7 +303,9 @@ out:
dev->id, NULL);
if (IS_ERR(dev->tty_dev)) {
err = PTR_ERR(dev->tty_dev);
+ spin_lock(&rfcomm_dev_lock);
list_del(&dev->list);
+ spin_unlock(&rfcomm_dev_lock);
goto free;
}
@@ -301,27 +324,6 @@ free:
return err;
}
-static void rfcomm_dev_del(struct rfcomm_dev *dev)
-{
- unsigned long flags;
- BT_DBG("dev %p", dev);
-
- BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
-
- spin_lock_irqsave(&dev->port.lock, flags);
- if (dev->port.count > 0) {
- spin_unlock_irqrestore(&dev->port.lock, flags);
- return;
- }
- spin_unlock_irqrestore(&dev->port.lock, flags);
-
- spin_lock(&rfcomm_dev_lock);
- list_del_init(&dev->list);
- spin_unlock(&rfcomm_dev_lock);
-
- tty_port_put(&dev->port);
-}
-
/* ---- Send buffer ---- */
static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
{
@@ -333,10 +335,9 @@ static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
static void rfcomm_wfree(struct sk_buff *skb)
{
struct rfcomm_dev *dev = (void *) skb->sk;
- struct tty_struct *tty = dev->port.tty;
atomic_sub(skb->truesize, &dev->wmem_alloc);
- if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags) && tty)
- tty_wakeup(tty);
+ if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
+ tty_port_tty_wakeup(&dev->port);
tty_port_put(&dev->port);
}
@@ -410,6 +411,7 @@ static int rfcomm_release_dev(void __user *arg)
{
struct rfcomm_dev_req req;
struct rfcomm_dev *dev;
+ struct tty_struct *tty;
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
@@ -429,11 +431,15 @@ static int rfcomm_release_dev(void __user *arg)
rfcomm_dlc_close(dev->dlc, 0);
/* Shut down TTY synchronously before freeing rfcomm_dev */
- if (dev->port.tty)
- tty_vhangup(dev->port.tty);
+ tty = tty_port_tty_get(&dev->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+
+ if (!test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
+ tty_port_put(&dev->port);
- if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
- rfcomm_dev_del(dev);
tty_port_put(&dev->port);
return 0;
}
@@ -563,16 +569,21 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
{
struct rfcomm_dev *dev = dlc->owner;
+ struct tty_struct *tty;
if (!dev)
return;
BT_DBG("dlc %p dev %p err %d", dlc, dev, err);
dev->err = err;
- wake_up_interruptible(&dev->wait);
+ if (dlc->state == BT_CONNECTED) {
+ device_move(dev->tty_dev, rfcomm_get_device(dev),
+ DPM_ORDER_DEV_AFTER_PARENT);
- if (dlc->state == BT_CLOSED) {
- if (!dev->port.tty) {
+ wake_up_interruptible(&dev->port.open_wait);
+ } else if (dlc->state == BT_CLOSED) {
+ tty = tty_port_tty_get(&dev->port);
+ if (!tty) {
if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
/* Drop DLC lock here to avoid deadlock
* 1. rfcomm_dev_get will take rfcomm_dev_lock
@@ -580,6 +591,9 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
* rfcomm_dev_lock -> dlc lock
* 2. tty_port_put will deadlock if it's
* the last reference
+ *
+ * FIXME: when we release the lock anything
+ * could happen to dev, even its destruction
*/
rfcomm_dlc_unlock(dlc);
if (rfcomm_dev_get(dev->id) == NULL) {
@@ -587,12 +601,17 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
return;
}
- rfcomm_dev_del(dev);
+ if (!test_and_set_bit(RFCOMM_TTY_RELEASED,
+ &dev->flags))
+ tty_port_put(&dev->port);
+
tty_port_put(&dev->port);
rfcomm_dlc_lock(dlc);
}
- } else
- tty_hangup(dev->port.tty);
+ } else {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
}
}
@@ -604,10 +623,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
- if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) {
- if (dev->port.tty && !C_CLOCAL(dev->port.tty))
- tty_hangup(dev->port.tty);
- }
+ if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV))
+ tty_port_tty_hangup(&dev->port, true);
dev->modem_status =
((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) |
@@ -638,124 +655,92 @@ static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
tty_flip_buffer_push(&dev->port);
}
-static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+/* do the reverse of install, clearing the tty fields and releasing the
+ * reference to tty_port
+ */
+static void rfcomm_tty_cleanup(struct tty_struct *tty)
{
- DECLARE_WAITQUEUE(wait, current);
- struct rfcomm_dev *dev;
- struct rfcomm_dlc *dlc;
- unsigned long flags;
- int err, id;
+ struct rfcomm_dev *dev = tty->driver_data;
- id = tty->index;
+ clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- BT_DBG("tty %p id %d", tty, id);
+ rfcomm_dlc_lock(dev->dlc);
+ tty->driver_data = NULL;
+ rfcomm_dlc_unlock(dev->dlc);
- /* We don't leak this refcount. For reasons which are not entirely
- clear, the TTY layer will call our ->close() method even if the
- open fails. We decrease the refcount there, and decreasing it
- here too would cause breakage. */
- dev = rfcomm_dev_get(id);
- if (!dev)
- return -ENODEV;
+ /*
+ * purge the dlc->tx_queue to avoid circular dependencies
+ * between dev and dlc
+ */
+ skb_queue_purge(&dev->dlc->tx_queue);
- BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
- dev->channel, dev->port.count);
+ tty_port_put(&dev->port);
+}
- spin_lock_irqsave(&dev->port.lock, flags);
- if (++dev->port.count > 1) {
- spin_unlock_irqrestore(&dev->port.lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&dev->port.lock, flags);
+/* we acquire the tty_port reference since it's here the tty is first used
+ * by setting the termios. We also populate the driver_data field and install
+ * the tty port
+ */
+static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct rfcomm_dev *dev;
+ struct rfcomm_dlc *dlc;
+ int err;
+
+ dev = rfcomm_dev_get(tty->index);
+ if (!dev)
+ return -ENODEV;
dlc = dev->dlc;
/* Attach TTY and open DLC */
-
rfcomm_dlc_lock(dlc);
tty->driver_data = dev;
- dev->port.tty = tty;
rfcomm_dlc_unlock(dlc);
set_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- err = rfcomm_dlc_open(dlc, &dev->src, &dev->dst, dev->channel);
- if (err < 0)
- return err;
-
- /* Wait for DLC to connect */
- add_wait_queue(&dev->wait, &wait);
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
+ /* install the tty_port */
+ err = tty_port_install(&dev->port, driver, tty);
+ if (err)
+ rfcomm_tty_cleanup(tty);
- if (dlc->state == BT_CLOSED) {
- err = -dev->err;
- break;
- }
+ return err;
+}
- if (dlc->state == BT_CONNECTED)
- break;
+static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct rfcomm_dev *dev = tty->driver_data;
+ int err;
- if (signal_pending(current)) {
- err = -EINTR;
- break;
- }
+ BT_DBG("tty %p id %d", tty, tty->index);
- tty_unlock(tty);
- schedule();
- tty_lock(tty);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev->wait, &wait);
+ BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
+ dev->channel, dev->port.count);
- if (err == 0)
- device_move(dev->tty_dev, rfcomm_get_device(dev),
- DPM_ORDER_DEV_AFTER_PARENT);
+ err = tty_port_open(&dev->port, tty, filp);
+ if (err)
+ return err;
+ /*
+ * FIXME: rfcomm should use proper flow control for
+ * received data. This hack will be unnecessary and can
+ * be removed when that's implemented
+ */
rfcomm_tty_copy_pending(dev);
rfcomm_dlc_unthrottle(dev->dlc);
- return err;
+ return 0;
}
static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- unsigned long flags;
-
- if (!dev)
- return;
BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
dev->port.count);
- spin_lock_irqsave(&dev->port.lock, flags);
- if (!--dev->port.count) {
- spin_unlock_irqrestore(&dev->port.lock, flags);
- if (dev->tty_dev->parent)
- device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
-
- /* Close DLC and dettach TTY */
- rfcomm_dlc_close(dev->dlc, 0);
-
- clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
-
- rfcomm_dlc_lock(dev->dlc);
- tty->driver_data = NULL;
- dev->port.tty = NULL;
- rfcomm_dlc_unlock(dev->dlc);
-
- if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
- spin_lock(&rfcomm_dev_lock);
- list_del_init(&dev->list);
- spin_unlock(&rfcomm_dev_lock);
-
- tty_port_put(&dev->port);
- }
- } else
- spin_unlock_irqrestore(&dev->port.lock, flags);
-
- tty_port_put(&dev->port);
+ tty_port_close(&dev->port, tty, filp);
}
static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -1055,17 +1040,11 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
BT_DBG("tty %p dev %p", tty, dev);
- if (!dev)
- return;
-
- rfcomm_tty_flush_buffer(tty);
+ tty_port_hangup(&dev->port);
- if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
- if (rfcomm_dev_get(dev->id) == NULL)
- return;
- rfcomm_dev_del(dev);
+ if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) &&
+ !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
tty_port_put(&dev->port);
- }
}
static int rfcomm_tty_tiocmget(struct tty_struct *tty)
@@ -1128,6 +1107,8 @@ static const struct tty_operations rfcomm_ops = {
.wait_until_sent = rfcomm_tty_wait_until_sent,
.tiocmget = rfcomm_tty_tiocmget,
.tiocmset = rfcomm_tty_tiocmset,
+ .install = rfcomm_tty_install,
+ .cleanup = rfcomm_tty_cleanup,
};
int __init rfcomm_init_ttys(void)
@@ -1146,7 +1127,7 @@ int __init rfcomm_init_ttys(void)
rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
rfcomm_tty_driver->init_termios = tty_std_termios;
- rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index e7bd4eea575c..96bd388d93a4 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -176,8 +176,13 @@ static int sco_connect(struct sock *sk)
else
type = SCO_LINK;
- hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
- HCI_AT_NO_BONDING);
+ if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
+ (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
+ hcon = hci_connect_sco(hdev, type, dst, sco_pi(sk)->setting);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
@@ -417,6 +422,8 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
+ sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
+
setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
bt_sock_link(&sco_sk_list, sk);
@@ -652,7 +659,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
-static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
+static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
{
struct hci_dev *hdev = conn->hdev;
@@ -664,11 +671,7 @@ static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
struct hci_cp_accept_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
-
- if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
- cp.role = 0x00; /* Become master */
- else
- cp.role = 0x01; /* Remain slave */
+ cp.role = 0x00; /* Ignored */
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
} else {
@@ -679,9 +682,21 @@ static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
- cp.max_latency = __constant_cpu_to_le16(0xffff);
- cp.content_format = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ cp.content_format = cpu_to_le16(setting);
+
+ switch (setting & SCO_AIRMODE_MASK) {
+ case SCO_AIRMODE_TRANSP:
+ if (conn->pkt_type & ESCO_2EV3)
+ cp.max_latency = __constant_cpu_to_le16(0x0008);
+ else
+ cp.max_latency = __constant_cpu_to_le16(0x000D);
+ cp.retrans_effort = 0x02;
+ break;
+ case SCO_AIRMODE_CVSD:
+ cp.max_latency = __constant_cpu_to_le16(0xffff);
+ cp.retrans_effort = 0xff;
+ break;
+ }
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
sizeof(cp), &cp);
@@ -698,7 +713,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
- sco_conn_defer_accept(pi->conn->hcon, 0);
+ sco_conn_defer_accept(pi->conn->hcon, pi->setting);
sk->sk_state = BT_CONFIG;
msg->msg_namelen = 0;
@@ -714,7 +729,8 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- int err = 0;
+ int len, err = 0;
+ struct bt_voice voice;
u32 opt;
BT_DBG("sk %p", sk);
@@ -740,6 +756,31 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
+ case BT_VOICE:
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
+ sk->sk_state != BT_CONNECT2) {
+ err = -EINVAL;
+ break;
+ }
+
+ voice.setting = sco_pi(sk)->setting;
+
+ len = min_t(unsigned int, sizeof(voice), optlen);
+ if (copy_from_user((char *) &voice, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ /* Explicitly check for these values */
+ if (voice.setting != BT_VOICE_TRANSPARENT &&
+ voice.setting != BT_VOICE_CVSD_16BIT) {
+ err = -EINVAL;
+ break;
+ }
+
+ sco_pi(sk)->setting = voice.setting;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -765,7 +806,9 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
switch (optname) {
case SCO_OPTIONS:
- if (sk->sk_state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED &&
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@@ -781,7 +824,9 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
break;
case SCO_CONNINFO:
- if (sk->sk_state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED &&
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@@ -809,6 +854,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
{
struct sock *sk = sock->sk;
int len, err = 0;
+ struct bt_voice voice;
BT_DBG("sk %p", sk);
@@ -834,6 +880,15 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
break;
+ case BT_VOICE:
+ voice.setting = sco_pi(sk)->setting;
+
+ len = min_t(unsigned int, len, sizeof(voice));
+ if (copy_to_user(optval, (char *)&voice, len))
+ err = -EFAULT;
+
+ break;
+
default:
err = -ENOPROTOOPT;
break;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 89659d4ed1f9..ca04163635da 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -245,22 +245,22 @@ fail:
int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
{
struct netpoll *np;
- int err = 0;
+ int err;
+
+ if (!p->br->dev->npinfo)
+ return 0;
np = kzalloc(sizeof(*p->np), gfp);
- err = -ENOMEM;
if (!np)
- goto out;
+ return -ENOMEM;
err = __netpoll_setup(np, p->dev, gfp);
if (err) {
kfree(np);
- goto out;
+ return err;
}
p->np = np;
-
-out:
return err;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 5623be6b9ecd..c41d5fbb91d0 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -363,7 +363,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (err)
goto err2;
- if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))
+ err = br_netpoll_enable(p, GFP_KERNEL);
+ if (err)
goto err3;
err = netdev_master_upper_dev_link(dev, br->dev);
@@ -382,6 +383,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
netdev_update_features(br->dev);
+ if (br->dev->needed_headroom < dev->needed_headroom)
+ br->dev->needed_headroom = dev->needed_headroom;
+
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6319c4333c39..85a09bb5ca51 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -9,6 +9,7 @@
#include <net/netlink.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
+#include <net/addrconf.h>
#endif
#include "br_private.h"
@@ -61,7 +62,8 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
for (i = 0; i < mdb->max; i++) {
struct net_bridge_mdb_entry *mp;
- struct net_bridge_port_group *p, **pp;
+ struct net_bridge_port_group *p;
+ struct net_bridge_port_group __rcu **pp;
struct net_bridge_port *port;
hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
@@ -253,7 +255,7 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
return false;
#if IS_ENABLED(CONFIG_IPV6)
} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
- if (!ipv6_is_transient_multicast(&entry->addr.u.ip6))
+ if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
return false;
#endif
} else
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index bbcb43582496..d1c578630678 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -29,6 +29,7 @@
#include <net/ipv6.h>
#include <net/mld.h>
#include <net/ip6_checksum.h>
+#include <net/addrconf.h>
#endif
#include "br_private.h"
@@ -724,7 +725,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
{
struct br_ip br_group;
- if (!ipv6_is_transient_multicast(group))
+ if (ipv6_addr_is_ll_all_nodes(group))
return 0;
br_group.u.ip6 = *group;
@@ -1255,7 +1256,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (!mld2q->mld2q_nsrcs)
group = &mld2q->mld2q_mca;
- max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
+ max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
}
br_multicast_query_received(br, port, &br->ip6_querier,
@@ -1410,7 +1411,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
&br->ip6_query;
- if (!ipv6_is_transient_multicast(group))
+ if (ipv6_addr_is_ll_all_nodes(group))
return;
br_group.u.ip6 = *group;
@@ -1547,8 +1548,14 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
* - MLD has always Router Alert hop-by-hop option
* - But we do not support jumbrograms.
*/
- if (ip6h->version != 6 ||
- ip6h->nexthdr != IPPROTO_HOPOPTS ||
+ if (ip6h->version != 6)
+ return 0;
+
+ /* Prevent flooding this packet if there is no listener present */
+ if (!ipv6_addr_is_ll_all_nodes(&ip6h->daddr))
+ BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
+
+ if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
ip6h->payload_len == 0)
return 0;
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 3a3f371b2841..2998dd1769a0 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -102,6 +102,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
+
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+ call_netdevice_notifiers(event, br->dev);
+ break;
}
/* Events that may cause spanning tree to refresh */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 263ba9034468..598cb0b333c6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -352,11 +352,6 @@ extern void br_dev_delete(struct net_device *dev, struct list_head *list);
extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
-static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
-{
- return br->dev->npinfo;
-}
-
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
{
@@ -369,11 +364,6 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
extern void br_netpoll_disable(struct net_bridge_port *p);
#else
-static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
-{
- return NULL;
-}
-
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
{
@@ -494,7 +484,7 @@ extern void br_multicast_free_pg(struct rcu_head *head);
extern struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port *port,
struct br_ip *group,
- struct net_bridge_port_group *next,
+ struct net_bridge_port_group __rcu *next,
unsigned char state);
extern void br_mdb_init(void);
extern void br_mdb_uninit(void);
@@ -504,16 +494,6 @@ extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/addrconf.h>
-static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
-{
- if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
- return 1;
- return 0;
-}
-#endif
-
static inline bool br_multicast_is_router(struct net_bridge *br)
{
return br->multicast_router == 2 ||
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 2bd4b58f4372..0f455227da83 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -293,9 +293,10 @@ int cfctrl_linkup_request(struct cflayer *layer,
count = cfctrl_cancel_req(&cfctrl->serv.layer,
user_layer);
- if (count != 1)
+ if (count != 1) {
pr_err("Could not remove request (%d)", count);
return -ENODEV;
+ }
}
return 0;
}
diff --git a/net/can/gw.c b/net/can/gw.c
index 2f291f961a17..3f9b0f3a2818 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -146,6 +146,7 @@ struct cgw_job {
/* tbc */
};
u8 gwtype;
+ u8 limit_hops;
u16 flags;
};
@@ -402,6 +403,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
/* put the incremented hop counter in the cloned skb */
cgw_hops(nskb) = cgw_hops(skb) + 1;
+
+ /* first processing of this CAN frame -> adjust to private hop limit */
+ if (gwj->limit_hops && cgw_hops(nskb) == 1)
+ cgw_hops(nskb) = max_hops - gwj->limit_hops + 1;
+
nskb->dev = gwj->dst.dev;
/* pointer to modifiable CAN frame */
@@ -509,6 +515,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
/* check non default settings of attributes */
+ if (gwj->limit_hops) {
+ if (nla_put_u8(skb, CGW_LIM_HOPS, gwj->limit_hops) < 0)
+ goto cancel;
+ }
+
if (gwj->mod.modtype.and) {
memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.and;
@@ -606,11 +617,12 @@ static const struct nla_policy cgw_policy[CGW_MAX+1] = {
[CGW_SRC_IF] = { .type = NLA_U32 },
[CGW_DST_IF] = { .type = NLA_U32 },
[CGW_FILTER] = { .len = sizeof(struct can_filter) },
+ [CGW_LIM_HOPS] = { .type = NLA_U8 },
};
/* check for common and gwtype specific attributes */
static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
- u8 gwtype, void *gwtypeattr)
+ u8 gwtype, void *gwtypeattr, u8 *limhops)
{
struct nlattr *tb[CGW_MAX+1];
struct cgw_frame_mod mb;
@@ -625,6 +637,13 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
if (err < 0)
return err;
+ if (tb[CGW_LIM_HOPS]) {
+ *limhops = nla_get_u8(tb[CGW_LIM_HOPS]);
+
+ if (*limhops < 1 || *limhops > max_hops)
+ return -EINVAL;
+ }
+
/* check for AND/OR/XOR/SET modifications */
if (tb[CGW_MOD_AND]) {
@@ -782,6 +801,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct rtcanmsg *r;
struct cgw_job *gwj;
+ u8 limhops = 0;
int err = 0;
if (!capable(CAP_NET_ADMIN))
@@ -808,7 +828,8 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
gwj->flags = r->flags;
gwj->gwtype = r->gwtype;
- err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw);
+ err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw,
+ &limhops);
if (err < 0)
goto out;
@@ -836,6 +857,8 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops)
goto put_src_dst_out;
+ gwj->limit_hops = limhops;
+
ASSERT_RTNL();
err = cgw_register_filter(gwj);
@@ -867,13 +890,14 @@ static void cgw_remove_all_jobs(void)
}
}
-static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct cgw_job *gwj = NULL;
struct hlist_node *nx;
struct rtcanmsg *r;
struct cf_mod mod;
struct can_can_gw ccgw;
+ u8 limhops = 0;
int err = 0;
if (!capable(CAP_NET_ADMIN))
@@ -890,7 +914,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
- err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw);
+ err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
if (err < 0)
return err;
@@ -910,6 +934,9 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (gwj->flags != r->flags)
continue;
+ if (gwj->limit_hops != limhops)
+ continue;
+
if (memcmp(&gwj->mod, &mod, sizeof(mod)))
continue;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index eb0a46a49bd4..3be308e14302 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -409,7 +409,7 @@ static void ceph_sock_write_space(struct sock *sk)
* and net/core/stream.c:sk_stream_write_space().
*/
if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ if (sk_stream_is_writeable(sk)) {
dout("%s %p queueing write work\n", __func__, con);
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
queue_con(con);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 8ab48cd89559..af814e764206 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -48,6 +48,7 @@
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/pagemap.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
@@ -573,6 +574,77 @@ fault:
}
EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
+/**
+ * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
+ * @skb: buffer to copy
+ * @from: io vector to copy to
+ * @offset: offset in the io vector to start copying from
+ * @count: amount of vectors to copy to buffer from
+ *
+ * The function will first copy up to headlen, and then pin the userspace
+ * pages and build frags through them.
+ *
+ * Returns 0, -EFAULT or -EMSGSIZE.
+ * Note: the iovec is not modified during the copy
+ */
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ int offset, size_t count)
+{
+ int len = iov_length(from, count) - offset;
+ int copy = min_t(int, skb_headlen(skb), len);
+ int size;
+ int i = 0;
+
+ /* copy up to skb headlen */
+ if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy))
+ return -EFAULT;
+
+ if (len == copy)
+ return 0;
+
+ offset += copy;
+ while (count--) {
+ struct page *page[MAX_SKB_FRAGS];
+ int num_pages;
+ unsigned long base;
+ unsigned long truesize;
+
+ /* Skip over from offset and copied */
+ if (offset >= from->iov_len) {
+ offset -= from->iov_len;
+ ++from;
+ continue;
+ }
+ len = from->iov_len - offset;
+ base = (unsigned long)from->iov_base + offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ if (i + size > MAX_SKB_FRAGS)
+ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+ if (num_pages != size) {
+ release_pages(&page[i], num_pages, 0);
+ return -EFAULT;
+ }
+ truesize = size * PAGE_SIZE;
+ skb->data_len += len;
+ skb->len += len;
+ skb->truesize += truesize;
+ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+ while (len) {
+ int off = base & ~PAGE_MASK;
+ int size = min_t(int, len, PAGE_SIZE - off);
+ skb_fill_page_desc(skb, i, page[i], off, size);
+ base += size;
+ len -= size;
+ i++;
+ }
+ offset = 0;
+ ++from;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(zerocopy_sg_from_iovec);
+
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
u8 __user *to, int len,
__wsum *csump)
diff --git a/net/core/dev.c b/net/core/dev.c
index 26755dd40daa..5c713f2239cc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -174,7 +174,7 @@ static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id;
static DEFINE_HASHTABLE(napi_hash, 8);
-seqcount_t devnet_rename_seq;
+static seqcount_t devnet_rename_seq;
static inline void dev_base_seq_inc(struct net *net)
{
@@ -1691,13 +1691,13 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
kfree_skb(skb);
return NET_RX_DROP;
}
- skb_scrub_packet(skb);
skb->protocol = eth_type_trans(skb, dev);
/* eth_type_trans() can set pkt_type.
- * clear pkt_type _after_ calling eth_type_trans()
+ * call skb_scrub_packet() after it to clear pkt_type _after_ calling
+ * eth_type_trans().
*/
- skb->pkt_type = PACKET_HOST;
+ skb_scrub_packet(skb, true);
return netif_rx(skb);
}
@@ -4367,57 +4367,48 @@ softnet_break:
goto out;
}
-struct netdev_upper {
+struct netdev_adjacent {
struct net_device *dev;
+
+ /* upper master flag, there can only be one master device per list */
bool master;
+
+ /* indicates that this dev is our first-level lower/upper device */
+ bool neighbour;
+
+ /* counter for the number of times this device was added to us */
+ u16 ref_nr;
+
struct list_head list;
struct rcu_head rcu;
- struct list_head search_list;
};
-static void __append_search_uppers(struct list_head *search_list,
- struct net_device *dev)
+static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
+ struct net_device *adj_dev,
+ bool upper)
{
- struct netdev_upper *upper;
+ struct netdev_adjacent *adj;
+ struct list_head *dev_list;
- list_for_each_entry(upper, &dev->upper_dev_list, list) {
- /* check if this upper is not already in search list */
- if (list_empty(&upper->search_list))
- list_add_tail(&upper->search_list, search_list);
+ dev_list = upper ? &dev->upper_dev_list : &dev->lower_dev_list;
+
+ list_for_each_entry(adj, dev_list, list) {
+ if (adj->dev == adj_dev)
+ return adj;
}
+ return NULL;
}
-static bool __netdev_search_upper_dev(struct net_device *dev,
- struct net_device *upper_dev)
+static inline struct netdev_adjacent *__netdev_find_upper(struct net_device *dev,
+ struct net_device *udev)
{
- LIST_HEAD(search_list);
- struct netdev_upper *upper;
- struct netdev_upper *tmp;
- bool ret = false;
-
- __append_search_uppers(&search_list, dev);
- list_for_each_entry(upper, &search_list, search_list) {
- if (upper->dev == upper_dev) {
- ret = true;
- break;
- }
- __append_search_uppers(&search_list, upper->dev);
- }
- list_for_each_entry_safe(upper, tmp, &search_list, search_list)
- INIT_LIST_HEAD(&upper->search_list);
- return ret;
+ return __netdev_find_adj(dev, udev, true);
}
-static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
- struct net_device *upper_dev)
+static inline struct netdev_adjacent *__netdev_find_lower(struct net_device *dev,
+ struct net_device *ldev)
{
- struct netdev_upper *upper;
-
- list_for_each_entry(upper, &dev->upper_dev_list, list) {
- if (upper->dev == upper_dev)
- return upper;
- }
- return NULL;
+ return __netdev_find_adj(dev, ldev, false);
}
/**
@@ -4462,7 +4453,7 @@ EXPORT_SYMBOL(netdev_has_any_upper_dev);
*/
struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
{
- struct netdev_upper *upper;
+ struct netdev_adjacent *upper;
ASSERT_RTNL();
@@ -4470,13 +4461,38 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
return NULL;
upper = list_first_entry(&dev->upper_dev_list,
- struct netdev_upper, list);
+ struct netdev_adjacent, list);
if (likely(upper->master))
return upper->dev;
return NULL;
}
EXPORT_SYMBOL(netdev_master_upper_dev_get);
+/* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next device from the dev's upper list, starting from iter
+ * position. The caller must hold RCU read lock.
+ */
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter)
+{
+ struct netdev_adjacent *upper;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+ if (&upper->list == &dev->upper_dev_list)
+ return NULL;
+
+ *iter = &upper->list;
+
+ return upper->dev;
+}
+EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
/**
* netdev_master_upper_dev_get_rcu - Get master upper device
* @dev: device
@@ -4486,20 +4502,158 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get);
*/
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
{
- struct netdev_upper *upper;
+ struct netdev_adjacent *upper;
upper = list_first_or_null_rcu(&dev->upper_dev_list,
- struct netdev_upper, list);
+ struct netdev_adjacent, list);
if (upper && likely(upper->master))
return upper->dev;
return NULL;
}
EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
+static int __netdev_adjacent_dev_insert(struct net_device *dev,
+ struct net_device *adj_dev,
+ bool neighbour, bool master,
+ bool upper)
+{
+ struct netdev_adjacent *adj;
+
+ adj = __netdev_find_adj(dev, adj_dev, upper);
+
+ if (adj) {
+ BUG_ON(neighbour);
+ adj->ref_nr++;
+ return 0;
+ }
+
+ adj = kmalloc(sizeof(*adj), GFP_KERNEL);
+ if (!adj)
+ return -ENOMEM;
+
+ adj->dev = adj_dev;
+ adj->master = master;
+ adj->neighbour = neighbour;
+ adj->ref_nr = 1;
+
+ dev_hold(adj_dev);
+ pr_debug("dev_hold for %s, because of %s link added from %s to %s\n",
+ adj_dev->name, upper ? "upper" : "lower", dev->name,
+ adj_dev->name);
+
+ if (!upper) {
+ list_add_tail_rcu(&adj->list, &dev->lower_dev_list);
+ return 0;
+ }
+
+ /* Ensure that master upper link is always the first item in list. */
+ if (master)
+ list_add_rcu(&adj->list, &dev->upper_dev_list);
+ else
+ list_add_tail_rcu(&adj->list, &dev->upper_dev_list);
+
+ return 0;
+}
+
+static inline int __netdev_upper_dev_insert(struct net_device *dev,
+ struct net_device *udev,
+ bool master, bool neighbour)
+{
+ return __netdev_adjacent_dev_insert(dev, udev, neighbour, master,
+ true);
+}
+
+static inline int __netdev_lower_dev_insert(struct net_device *dev,
+ struct net_device *ldev,
+ bool neighbour)
+{
+ return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false,
+ false);
+}
+
+void __netdev_adjacent_dev_remove(struct net_device *dev,
+ struct net_device *adj_dev, bool upper)
+{
+ struct netdev_adjacent *adj;
+
+ if (upper)
+ adj = __netdev_find_upper(dev, adj_dev);
+ else
+ adj = __netdev_find_lower(dev, adj_dev);
+
+ if (!adj)
+ BUG();
+
+ if (adj->ref_nr > 1) {
+ adj->ref_nr--;
+ return;
+ }
+
+ list_del_rcu(&adj->list);
+ pr_debug("dev_put for %s, because of %s link removed from %s to %s\n",
+ adj_dev->name, upper ? "upper" : "lower", dev->name,
+ adj_dev->name);
+ dev_put(adj_dev);
+ kfree_rcu(adj, rcu);
+}
+
+static inline void __netdev_upper_dev_remove(struct net_device *dev,
+ struct net_device *udev)
+{
+ return __netdev_adjacent_dev_remove(dev, udev, true);
+}
+
+static inline void __netdev_lower_dev_remove(struct net_device *dev,
+ struct net_device *ldev)
+{
+ return __netdev_adjacent_dev_remove(dev, ldev, false);
+}
+
+int __netdev_adjacent_dev_insert_link(struct net_device *dev,
+ struct net_device *upper_dev,
+ bool master, bool neighbour)
+{
+ int ret;
+
+ ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour);
+ if (ret)
+ return ret;
+
+ ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour);
+ if (ret) {
+ __netdev_upper_dev_remove(dev, upper_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int __netdev_adjacent_dev_link(struct net_device *dev,
+ struct net_device *udev)
+{
+ return __netdev_adjacent_dev_insert_link(dev, udev, false, false);
+}
+
+static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+ struct net_device *udev,
+ bool master)
+{
+ return __netdev_adjacent_dev_insert_link(dev, udev, master, true);
+}
+
+void __netdev_adjacent_dev_unlink(struct net_device *dev,
+ struct net_device *upper_dev)
+{
+ __netdev_upper_dev_remove(dev, upper_dev);
+ __netdev_lower_dev_remove(upper_dev, dev);
+}
+
+
static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master)
{
- struct netdev_upper *upper;
+ struct netdev_adjacent *i, *j, *to_i, *to_j;
+ int ret = 0;
ASSERT_RTNL();
@@ -4507,7 +4661,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
return -EBUSY;
/* To prevent loops, check if dev is not upper device to upper_dev. */
- if (__netdev_search_upper_dev(upper_dev, dev))
+ if (__netdev_find_upper(upper_dev, dev))
return -EBUSY;
if (__netdev_find_upper(dev, upper_dev))
@@ -4516,22 +4670,76 @@ static int __netdev_upper_dev_link(struct net_device *dev,
if (master && netdev_master_upper_dev_get(dev))
return -EBUSY;
- upper = kmalloc(sizeof(*upper), GFP_KERNEL);
- if (!upper)
- return -ENOMEM;
+ ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, master);
+ if (ret)
+ return ret;
- upper->dev = upper_dev;
- upper->master = master;
- INIT_LIST_HEAD(&upper->search_list);
+ /* Now that we linked these devs, make all the upper_dev's
+ * upper_dev_list visible to every dev's lower_dev_list and vice
+ * versa, and don't forget the devices itself. All of these
+ * links are non-neighbours.
+ */
+ list_for_each_entry(i, &dev->lower_dev_list, list) {
+ list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+ ret = __netdev_adjacent_dev_link(i->dev, j->dev);
+ if (ret)
+ goto rollback_mesh;
+ }
+ }
+
+ /* add dev to every upper_dev's upper device */
+ list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+ ret = __netdev_adjacent_dev_link(dev, i->dev);
+ if (ret)
+ goto rollback_upper_mesh;
+ }
+
+ /* add upper_dev to every dev's lower device */
+ list_for_each_entry(i, &dev->lower_dev_list, list) {
+ ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
+ if (ret)
+ goto rollback_lower_mesh;
+ }
- /* Ensure that master upper link is always the first item in list. */
- if (master)
- list_add_rcu(&upper->list, &dev->upper_dev_list);
- else
- list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
- dev_hold(upper_dev);
call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
return 0;
+
+rollback_lower_mesh:
+ to_i = i;
+ list_for_each_entry(i, &dev->lower_dev_list, list) {
+ if (i == to_i)
+ break;
+ __netdev_adjacent_dev_unlink(i->dev, upper_dev);
+ }
+
+ i = NULL;
+
+rollback_upper_mesh:
+ to_i = i;
+ list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+ if (i == to_i)
+ break;
+ __netdev_adjacent_dev_unlink(dev, i->dev);
+ }
+
+ i = j = NULL;
+
+rollback_mesh:
+ to_i = i;
+ to_j = j;
+ list_for_each_entry(i, &dev->lower_dev_list, list) {
+ list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+ if (i == to_i && j == to_j)
+ break;
+ __netdev_adjacent_dev_unlink(i->dev, j->dev);
+ }
+ if (i == to_i)
+ break;
+ }
+
+ __netdev_adjacent_dev_unlink(dev, upper_dev);
+
+ return ret;
}
/**
@@ -4580,16 +4788,28 @@ EXPORT_SYMBOL(netdev_master_upper_dev_link);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev)
{
- struct netdev_upper *upper;
-
+ struct netdev_adjacent *i, *j;
ASSERT_RTNL();
- upper = __netdev_find_upper(dev, upper_dev);
- if (!upper)
- return;
- list_del_rcu(&upper->list);
- dev_put(upper_dev);
- kfree_rcu(upper, rcu);
+ __netdev_adjacent_dev_unlink(dev, upper_dev);
+
+ /* Here is the tricky part. We must remove all dev's lower
+ * devices from all upper_dev's upper devices and vice
+ * versa, to maintain the graph relationship.
+ */
+ list_for_each_entry(i, &dev->lower_dev_list, list)
+ list_for_each_entry(j, &upper_dev->upper_dev_list, list)
+ __netdev_adjacent_dev_unlink(i->dev, j->dev);
+
+ /* remove also the devices itself from lower/upper device
+ * list
+ */
+ list_for_each_entry(i, &dev->lower_dev_list, list)
+ __netdev_adjacent_dev_unlink(i->dev, upper_dev);
+
+ list_for_each_entry(i, &upper_dev->upper_dev_list, list)
+ __netdev_adjacent_dev_unlink(dev, i->dev);
+
call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
@@ -4989,6 +5209,24 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier)
EXPORT_SYMBOL(dev_change_carrier);
/**
+ * dev_get_phys_port_id - Get device physical port ID
+ * @dev: device
+ * @ppid: port ID
+ *
+ * Get device physical port ID
+ */
+int dev_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!ops->ndo_get_phys_port_id)
+ return -EOPNOTSUPP;
+ return ops->ndo_get_phys_port_id(dev, ppid);
+}
+EXPORT_SYMBOL(dev_get_phys_port_id);
+
+/**
* dev_new_index - allocate an ifindex
* @net: the applicable net namespace
*
@@ -5832,6 +6070,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
INIT_LIST_HEAD(&dev->unreg_list);
INIT_LIST_HEAD(&dev->link_watch_list);
INIT_LIST_HEAD(&dev->upper_dev_list);
+ INIT_LIST_HEAD(&dev->lower_dev_list);
dev->priv_flags = IFF_XMIT_DST_RELEASE;
setup(dev);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 21735440c44a..2e654138433c 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -33,6 +33,9 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
r->flags = flags;
r->fr_net = hold_net(ops->fro_net);
+ r->suppress_prefixlen = -1;
+ r->suppress_ifgroup = -1;
+
/* The lock is not required here, the list in unreacheable
* at the moment this function is called */
list_add_tail(&r->list, &ops->rules_list);
@@ -226,6 +229,9 @@ jumped:
else
err = ops->action(rule, fl, flags, arg);
+ if (!err && ops->suppress && ops->suppress(rule, arg))
+ continue;
+
if (err != -EAGAIN) {
if ((arg->flags & FIB_LOOKUP_NOREF) ||
likely(atomic_inc_not_zero(&rule->refcnt))) {
@@ -337,6 +343,15 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
rule->action = frh->action;
rule->flags = frh->flags;
rule->table = frh_get_table(frh, tb);
+ if (tb[FRA_SUPPRESS_PREFIXLEN])
+ rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
+ else
+ rule->suppress_prefixlen = -1;
+
+ if (tb[FRA_SUPPRESS_IFGROUP])
+ rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
+ else
+ rule->suppress_ifgroup = -1;
if (!tb[FRA_PRIORITY] && ops->default_pref)
rule->pref = ops->default_pref(ops);
@@ -523,6 +538,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
+ nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
+ nla_total_size(4) /* FRA_PRIORITY */
+ nla_total_size(4) /* FRA_TABLE */
+ + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
+ + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
+ nla_total_size(4) /* FRA_FWMARK */
+ nla_total_size(4); /* FRA_FWMASK */
@@ -548,6 +565,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh->table = rule->table;
if (nla_put_u32(skb, FRA_TABLE, rule->table))
goto nla_put_failure;
+ if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
+ goto nla_put_failure;
frh->res1 = 0;
frh->res2 = 0;
frh->action = rule->action;
@@ -580,6 +599,12 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
(rule->target &&
nla_put_u32(skb, FRA_GOTO, rule->target)))
goto nla_put_failure;
+
+ if (rule->suppress_ifgroup != -1) {
+ if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
+ goto nla_put_failure;
+ }
+
if (ops->fill(rule, skb, frh) < 0)
goto nla_put_failure;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d12e3a9a5356..0ff42f029ace 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -140,7 +140,11 @@ ipv6:
break;
}
case IPPROTO_IPIP:
- goto again;
+ proto = htons(ETH_P_IP);
+ goto ip;
+ case IPPROTO_IPV6:
+ proto = htons(ETH_P_IPV6);
+ goto ipv6;
default:
break;
}
diff --git a/net/core/iovec.c b/net/core/iovec.c
index de178e462682..b77eeecc0011 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -212,3 +212,27 @@ out_fault:
goto out;
}
EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
+
+unsigned long iov_pages(const struct iovec *iov, int offset,
+ unsigned long nr_segs)
+{
+ unsigned long seg, base;
+ int pages = 0, len, size;
+
+ while (nr_segs && (offset >= iov->iov_len)) {
+ offset -= iov->iov_len;
+ ++iov;
+ --nr_segs;
+ }
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ base = (unsigned long)iov[seg].iov_base + offset;
+ len = iov[seg].iov_len - offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ pages += size;
+ offset = 0;
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL(iov_pages);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 60533db8b72d..6072610a8672 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2759,13 +2759,11 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
-#ifdef CONFIG_ARPD
void neigh_app_ns(struct neighbour *n)
{
__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
}
EXPORT_SYMBOL(neigh_app_ns);
-#endif /* CONFIG_ARPD */
#ifdef CONFIG_SYSCTL
static int zero;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 707c3134ddf2..3f40ea9de814 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -347,6 +347,28 @@ static ssize_t group_store(struct device *dev, struct device_attribute *attr,
NETDEVICE_SHOW(group, fmt_dec);
static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
+static ssize_t phys_port_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ ssize_t ret = -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (dev_isalive(netdev)) {
+ struct netdev_phys_port_id ppid;
+
+ ret = dev_get_phys_port_id(netdev, &ppid);
+ if (!ret)
+ ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
+ }
+ rtnl_unlock();
+
+ return ret;
+}
+static DEVICE_ATTR_RO(phys_port_id);
+
static struct attribute *net_class_attrs[] = {
&dev_attr_netdev_group.attr,
&dev_attr_type.attr,
@@ -367,6 +389,7 @@ static struct attribute *net_class_attrs[] = {
&dev_attr_mtu.attr,
&dev_attr_flags.attr,
&dev_attr_tx_queue_len.attr,
+ &dev_attr_phys_port_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(net_class);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 9640972ec50e..261357a66300 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -160,6 +160,8 @@
#include <net/net_namespace.h>
#include <net/checksum.h>
#include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#ifdef CONFIG_XFRM
#include <net/xfrm.h>
@@ -198,6 +200,7 @@
#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
#define F_NODE (1<<15) /* Node memory alloc*/
+#define F_UDPCSUM (1<<16) /* Include UDP checksum */
/* Thread control flag bits */
#define T_STOP (1<<0) /* Stop run */
@@ -631,6 +634,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->flags & F_UDPDST_RND)
seq_printf(seq, "UDPDST_RND ");
+ if (pkt_dev->flags & F_UDPCSUM)
+ seq_printf(seq, "UDPCSUM ");
+
if (pkt_dev->flags & F_MPLS_RND)
seq_printf(seq, "MPLS_RND ");
@@ -1228,6 +1234,12 @@ static ssize_t pktgen_if_write(struct file *file,
else if (strcmp(f, "!NODE_ALLOC") == 0)
pkt_dev->flags &= ~F_NODE;
+ else if (strcmp(f, "UDPCSUM") == 0)
+ pkt_dev->flags |= F_UDPCSUM;
+
+ else if (strcmp(f, "!UDPCSUM") == 0)
+ pkt_dev->flags &= ~F_UDPCSUM;
+
else {
sprintf(pg_result,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
@@ -2733,7 +2745,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
udph->source = htons(pkt_dev->cur_udp_src);
udph->dest = htons(pkt_dev->cur_udp_dst);
udph->len = htons(datalen + 8); /* DATA + udphdr */
- udph->check = 0; /* No checksum */
+ udph->check = 0;
iph->ihl = 5;
iph->version = 4;
@@ -2747,11 +2759,28 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
iph->frag_off = 0;
iplen = 20 + 8 + datalen;
iph->tot_len = htons(iplen);
- iph->check = 0;
- iph->check = ip_fast_csum((void *)iph, iph->ihl);
+ ip_send_check(iph);
skb->protocol = protocol;
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V4_CSUM) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum = 0;
+ udp4_hwcsum(skb, udph->source, udph->dest);
+ } else {
+ __wsum csum = udp_csum(skb);
+
+ /* add protocol-dependent pseudo-header */
+ udph->check = csum_tcpudp_magic(udph->source, udph->dest,
+ datalen + 8, IPPROTO_UDP, csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
+
pktgen_finalize_skb(pkt_dev, skb, datalen);
#ifdef CONFIG_XFRM
@@ -2768,7 +2797,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
struct sk_buff *skb = NULL;
__u8 *eth;
struct udphdr *udph;
- int datalen;
+ int datalen, udplen;
struct ipv6hdr *iph;
__be16 protocol = htons(ETH_P_IPV6);
__be32 *mpls;
@@ -2844,10 +2873,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
net_info_ratelimited("increased datalen to %d\n", datalen);
}
+ udplen = datalen + sizeof(struct udphdr);
udph->source = htons(pkt_dev->cur_udp_src);
udph->dest = htons(pkt_dev->cur_udp_dst);
- udph->len = htons(datalen + sizeof(struct udphdr));
- udph->check = 0; /* No checksum */
+ udph->len = htons(udplen);
+ udph->check = 0;
*(__be32 *) iph = htonl(0x60000000); /* Version + flow */
@@ -2858,7 +2888,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
iph->hop_limit = 32;
- iph->payload_len = htons(sizeof(struct udphdr) + datalen);
+ iph->payload_len = htons(udplen);
iph->nexthdr = IPPROTO_UDP;
iph->daddr = pkt_dev->cur_in6_daddr;
@@ -2868,6 +2898,23 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V6_CSUM) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
+ } else {
+ __wsum csum = udp_csum(skb);
+
+ /* add protocol-dependent pseudo-header */
+ udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
+
pktgen_finalize_skb(pkt_dev, skb, datalen);
return skb;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ca198c1d1d30..2a0e21de3060 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -767,7 +767,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
- + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
+ + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -846,6 +847,24 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ struct netdev_phys_port_id ppid;
+
+ err = dev_get_phys_port_id(dev, &ppid);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask)
@@ -913,6 +932,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
goto nla_put_failure;
}
+ if (rtnl_phys_port_id_fill(skb, dev))
+ goto nla_put_failure;
+
attr = nla_reserve(skb, IFLA_STATS,
sizeof(struct rtnl_link_stats));
if (attr == NULL)
@@ -1113,6 +1135,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
+ [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
};
EXPORT_SYMBOL(ifla_policy);
@@ -1844,10 +1867,10 @@ replay:
else
err = register_netdevice(dev);
- if (err < 0 && !IS_ERR(dev))
+ if (err < 0) {
free_netdev(dev);
- if (err < 0)
goto out;
+ }
err = rtnl_configure_link(dev, ifm);
if (err < 0)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2c3d0f53d198..d81cff119f73 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3500,17 +3500,22 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
EXPORT_SYMBOL(skb_try_coalesce);
/**
- * skb_scrub_packet - scrub an skb before sending it to another netns
+ * skb_scrub_packet - scrub an skb
*
* @skb: buffer to clean
- *
- * skb_scrub_packet can be used to clean an skb before injecting it in
- * another namespace. We have to clear all information in the skb that
- * could impact namespace isolation.
+ * @xnet: packet is crossing netns
+ *
+ * skb_scrub_packet can be used after encapsulating or decapsulting a packet
+ * into/from a tunnel. Some information have to be cleared during these
+ * operations.
+ * skb_scrub_packet can also be used to clean a skb before injecting it in
+ * another namespace (@xnet == true). We have to clear all information in the
+ * skb that could impact namespace isolation.
*/
-void skb_scrub_packet(struct sk_buff *skb)
+void skb_scrub_packet(struct sk_buff *skb, bool xnet)
{
- skb_orphan(skb);
+ if (xnet)
+ skb_orphan(skb);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
diff --git a/net/core/sock.c b/net/core/sock.c
index 2c097c5a35dd..5b6beba494a3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -93,6 +93,7 @@
#include <linux/capability.h>
#include <linux/errno.h>
+#include <linux/errqueue.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
@@ -1575,6 +1576,25 @@ void sock_wfree(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_wfree);
+void skb_orphan_partial(struct sk_buff *skb)
+{
+ /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
+ * so we do not completely orphan skb, but transfert all
+ * accounted bytes but one, to avoid unexpected reorders.
+ */
+ if (skb->destructor == sock_wfree
+#ifdef CONFIG_INET
+ || skb->destructor == tcp_wfree
+#endif
+ ) {
+ atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
+ skb->truesize = 1;
+ } else {
+ skb_orphan(skb);
+ }
+}
+EXPORT_SYMBOL(skb_orphan_partial);
+
/*
* Read buffer destructor automatically called from kfree_skb.
*/
@@ -1721,24 +1741,23 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock,
- int *errcode)
+ int *errcode, int max_page_order)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ unsigned long chunk;
gfp_t gfp_mask;
long timeo;
int err;
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ struct page *page;
+ int i;
err = -EMSGSIZE;
if (npages > MAX_SKB_FRAGS)
goto failure;
- gfp_mask = sk->sk_allocation;
- if (gfp_mask & __GFP_WAIT)
- gfp_mask |= __GFP_REPEAT;
-
timeo = sock_sndtimeo(sk, noblock);
- while (1) {
+ while (!skb) {
err = sock_error(sk);
if (err != 0)
goto failure;
@@ -1747,50 +1766,52 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure;
- if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
- skb = alloc_skb(header_len, gfp_mask);
- if (skb) {
- int i;
-
- /* No pages, we're done... */
- if (!data_len)
- break;
-
- skb->truesize += data_len;
- skb_shinfo(skb)->nr_frags = npages;
- for (i = 0; i < npages; i++) {
- struct page *page;
-
- page = alloc_pages(sk->sk_allocation, 0);
- if (!page) {
- err = -ENOBUFS;
- skb_shinfo(skb)->nr_frags = i;
- kfree_skb(skb);
- goto failure;
- }
-
- __skb_fill_page_desc(skb, i,
- page, 0,
- (data_len >= PAGE_SIZE ?
- PAGE_SIZE :
- data_len));
- data_len -= PAGE_SIZE;
- }
+ if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ err = -EAGAIN;
+ if (!timeo)
+ goto failure;
+ if (signal_pending(current))
+ goto interrupted;
+ timeo = sock_wait_for_wmem(sk, timeo);
+ continue;
+ }
- /* Full success... */
- break;
- }
- err = -ENOBUFS;
+ err = -ENOBUFS;
+ gfp_mask = sk->sk_allocation;
+ if (gfp_mask & __GFP_WAIT)
+ gfp_mask |= __GFP_REPEAT;
+
+ skb = alloc_skb(header_len, gfp_mask);
+ if (!skb)
goto failure;
+
+ skb->truesize += data_len;
+
+ for (i = 0; npages > 0; i++) {
+ int order = max_page_order;
+
+ while (order) {
+ if (npages >= 1 << order) {
+ page = alloc_pages(sk->sk_allocation |
+ __GFP_COMP | __GFP_NOWARN,
+ order);
+ if (page)
+ goto fill_page;
+ }
+ order--;
+ }
+ page = alloc_page(sk->sk_allocation);
+ if (!page)
+ goto failure;
+fill_page:
+ chunk = min_t(unsigned long, data_len,
+ PAGE_SIZE << order);
+ skb_fill_page_desc(skb, i, page, 0, chunk);
+ data_len -= chunk;
+ npages -= 1 << order;
}
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- err = -EAGAIN;
- if (!timeo)
- goto failure;
- if (signal_pending(current))
- goto interrupted;
- timeo = sock_wait_for_wmem(sk, timeo);
}
skb_set_owner_w(skb, sk);
@@ -1799,6 +1820,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
interrupted:
err = sock_intr_errno(timeo);
failure:
+ kfree_skb(skb);
*errcode = err;
return NULL;
}
@@ -1807,7 +1829,7 @@ EXPORT_SYMBOL(sock_alloc_send_pskb);
struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
int noblock, int *errcode)
{
- return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
+ return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
}
EXPORT_SYMBOL(sock_alloc_send_skb);
@@ -2425,6 +2447,52 @@ void sock_enable_timestamp(struct sock *sk, int flag)
}
}
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+ int level, int type)
+{
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+ int copied, err;
+
+ err = -EAGAIN;
+ skb = skb_dequeue(&sk->sk_error_queue);
+ if (skb == NULL)
+ goto out;
+
+ copied = skb->len;
+ if (copied > len) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto out_free_skb;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ serr = SKB_EXT_ERR(skb);
+ put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
+
+ msg->msg_flags |= MSG_ERRQUEUE;
+ err = copied;
+
+ /* Reset and regenerate socket error */
+ spin_lock_bh(&sk->sk_error_queue.lock);
+ sk->sk_err = 0;
+ if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+ sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+ sk->sk_error_report(sk);
+ } else
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+
+out_free_skb:
+ kfree_skb(skb);
+out:
+ return err;
+}
+EXPORT_SYMBOL(sock_recv_errqueue);
+
/*
* Get a socket option on an socket.
*
diff --git a/net/core/stream.c b/net/core/stream.c
index f5df85dcd20b..512f0a24269b 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -30,7 +30,7 @@ void sk_stream_write_space(struct sock *sk)
struct socket *sock = sk->sk_socket;
struct socket_wq *wq;
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
+ if (sk_stream_is_writeable(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
rcu_read_lock();
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 31107abd2783..cca444190907 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -20,6 +20,7 @@
#include <net/sock.h>
#include <net/net_ratelimit.h>
#include <net/busy_poll.h>
+#include <net/pkt_sched.h>
static int zero = 0;
static int one = 1;
@@ -193,6 +194,26 @@ static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
}
#endif /* CONFIG_NET_FLOW_LIMIT */
+#ifdef CONFIG_NET_SCHED
+static int set_default_qdisc(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ char id[IFNAMSIZ];
+ struct ctl_table tbl = {
+ .data = id,
+ .maxlen = IFNAMSIZ,
+ };
+ int ret;
+
+ qdisc_get_default(id, IFNAMSIZ);
+
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ ret = qdisc_set_default(id);
+ return ret;
+}
+#endif
+
static struct ctl_table net_core_table[] = {
#ifdef CONFIG_NET
{
@@ -315,7 +336,14 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
-#
+#endif
+#ifdef CONFIG_NET_SCHED
+ {
+ .procname = "default_qdisc",
+ .mode = 0644,
+ .maxlen = IFNAMSIZ,
+ .proc_handler = set_default_qdisc
+ },
#endif
#endif /* CONFIG_NET */
{
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 6c7c78b83940..ba64750f0387 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -336,7 +336,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
@@ -347,7 +347,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
* wspace test but before the flags are set,
* IO signal will be lost.
*/
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ if (sk_stream_is_writeable(sk))
mask |= POLLOUT | POLLWRNORM;
}
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 6ebd8fbd9285..29d684ebca6a 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -347,7 +347,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
slave_dev->features = master->vlan_features;
SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
- memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(slave_dev, master);
slave_dev->tx_queue_len = 0;
switch (ds->dst->tag_protocol) {
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 3b9d5f20bd1c..c85e71e0c7ff 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -67,39 +67,6 @@ static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
static LIST_HEAD(lowpan_devices);
-/*
- * Uncompression of linklocal:
- * 0 -> 16 bytes from packet
- * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
- * 2 -> 2 bytes from prefix - zeroes + 2 from packet
- * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
- *
- * NOTE: => the uncompress function does change 0xf to 0x10
- * NOTE: 0x00 => no-autoconfig => unspecified
- */
-static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
-
-/*
- * Uncompression of ctx-based:
- * 0 -> 0 bits from packet [unspecified / reserved]
- * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
- * 2 -> 8 bytes from prefix - zeroes + 2 from packet
- * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
- */
-static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
-
-/*
- * Uncompression of ctx-base
- * 0 -> 0 bits from packet
- * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
- * 2 -> 2 bytes from prefix - zeroes + 3 from packet
- * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
- */
-static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
-
-/* Link local prefix */
-static const u8 lowpan_llprefix[] = {0xfe, 0x80};
-
/* private device info */
struct lowpan_dev_info {
struct net_device *real_dev; /* real WPAN device ptr */
@@ -191,55 +158,177 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
return rol8(val, shift);
}
-static void
-lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
+/*
+ * Uncompress address function for source and
+ * destination address(non-multicast).
+ *
+ * address_mode is sam value or dam value.
+ */
+static int
+lowpan_uncompress_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 address_mode,
+ const struct ieee802154_addr *lladdr)
{
- memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
- /* second bit-flip (Universe/Local) is done according RFC2464 */
- ipaddr->s6_addr[8] ^= 0x02;
+ bool fail;
+
+ switch (address_mode) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* for global link addresses */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
+ break;
+ case LOWPAN_IPHC_ADDR_02:
+ /* fe:80::ff:fe00:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
+ break;
+ case LOWPAN_IPHC_ADDR_03:
+ fail = false;
+ switch (lladdr->addr_type) {
+ case IEEE802154_ADDR_LONG:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr->hwaddr,
+ IEEE802154_ADDR_LEN);
+ /* second bit-flip (Universe/Local)
+ * is done according RFC2464
+ */
+ ipaddr->s6_addr[8] ^= 0x02;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ /* fe:80::ff:fe00:XXXX
+ * \__/
+ * short_addr
+ *
+ * Universe/Local bit is zero.
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ ipaddr->s6_addr16[7] = htons(lladdr->short_addr);
+ break;
+ default:
+ pr_debug("Invalid addr_type set\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_debug("Invalid address mode value: 0x%x\n", address_mode);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
+ }
+
+ lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 addr is:\n",
+ ipaddr->s6_addr, 16);
+
+ return 0;
}
-/*
- * Uncompress addresses based on a prefix and a postfix with zeroes in
- * between. If the postfix is zero in length it will use the link address
- * to configure the IP address (autoconf style).
- * pref_post_count takes a byte where the first nibble specify prefix count
- * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
+/* Uncompress address function for source context
+ * based address(non-multicast).
*/
static int
-lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
- u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
+lowpan_uncompress_context_based_src_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 sam)
{
- u8 prefcount = pref_post_count >> 4;
- u8 postcount = pref_post_count & 0x0f;
-
- /* full nibble 15 => 16 */
- prefcount = (prefcount == 15 ? 16 : prefcount);
- postcount = (postcount == 15 ? 16 : postcount);
-
- if (lladdr)
- lowpan_raw_dump_inline(__func__, "linklocal address",
- lladdr, IEEE802154_ADDR_LEN);
- if (prefcount > 0)
- memcpy(ipaddr, prefix, prefcount);
-
- if (prefcount + postcount < 16)
- memset(&ipaddr->s6_addr[prefcount], 0,
- 16 - (prefcount + postcount));
-
- if (postcount > 0) {
- memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
- skb_pull(skb, postcount);
- } else if (prefcount > 0) {
- if (lladdr == NULL)
- return -EINVAL;
+ switch (sam) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* unspec address ::
+ * Do nothing, address is already ::
+ */
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_02:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_03:
+ /* TODO */
+ netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
+ return -EINVAL;
+ default:
+ pr_debug("Invalid sam value: 0x%x\n", sam);
+ return -EINVAL;
+ }
+
+ lowpan_raw_dump_inline(NULL,
+ "Reconstructed context based ipv6 src addr is:\n",
+ ipaddr->s6_addr, 16);
+
+ return 0;
+}
- /* no IID based configuration if no prefix and no data */
- lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
+/* Uncompress function for multicast destination address,
+ * when M bit is set.
+ */
+static int
+lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 dam)
+{
+ bool fail;
+
+ switch (dam) {
+ case LOWPAN_IPHC_DAM_00:
+ /* 00: 128 bits. The full address
+ * is carried in-line.
+ */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_DAM_01:
+ /* 01: 48 bits. The address takes
+ * the form ffXX::00XX:XXXX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
+ break;
+ case LOWPAN_IPHC_DAM_10:
+ /* 10: 32 bits. The address takes
+ * the form ffXX::00XX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
+ break;
+ case LOWPAN_IPHC_DAM_11:
+ /* 11: 8 bits. The address takes
+ * the form ff02::00XX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ ipaddr->s6_addr[1] = 0x02;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
+ break;
+ default:
+ pr_debug("DAM value has a wrong value: 0x%x\n", dam);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
}
- pr_debug("uncompressing %d + %d => ", prefcount, postcount);
- lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
+ lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is:\n",
+ ipaddr->s6_addr, 16);
return 0;
}
@@ -702,6 +791,12 @@ lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
skb_reserve(frame->skb, sizeof(struct ipv6hdr));
skb_put(frame->skb, frame->length);
+ /* copy the first control block to keep a
+ * trace of the link-layer addresses in case
+ * of a link-local compressed address
+ */
+ memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
+
init_timer(&frame->timer);
/* time out is the same as for ipv6 - 60 sec */
frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
@@ -723,9 +818,9 @@ frame_err:
static int
lowpan_process_data(struct sk_buff *skb)
{
- struct ipv6hdr hdr;
+ struct ipv6hdr hdr = {};
u8 tmp, iphc0, iphc1, num_context = 0;
- u8 *_saddr, *_daddr;
+ const struct ieee802154_addr *_saddr, *_daddr;
int err;
lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
@@ -828,8 +923,8 @@ lowpan_process_data(struct sk_buff *skb)
if (lowpan_fetch_skb_u8(skb, &iphc1))
goto drop;
- _saddr = mac_cb(skb)->sa.hwaddr;
- _daddr = mac_cb(skb)->da.hwaddr;
+ _saddr = &mac_cb(skb)->sa;
+ _daddr = &mac_cb(skb)->da;
pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
@@ -868,8 +963,6 @@ lowpan_process_data(struct sk_buff *skb)
hdr.priority = ((tmp >> 2) & 0x0f);
hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
- hdr.flow_lbl[1] = 0;
- hdr.flow_lbl[2] = 0;
break;
/*
* Flow Label carried in-line
@@ -885,10 +978,6 @@ lowpan_process_data(struct sk_buff *skb)
break;
/* Traffic Class and Flow Label are elided */
case 3: /* 11b */
- hdr.priority = 0;
- hdr.flow_lbl[0] = 0;
- hdr.flow_lbl[1] = 0;
- hdr.flow_lbl[2] = 0;
break;
default:
break;
@@ -915,10 +1004,18 @@ lowpan_process_data(struct sk_buff *skb)
/* Extract SAM to the tmp variable */
tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
- /* Source address uncompression */
- pr_debug("source address stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
- lowpan_unc_llconf[tmp], skb->data);
+ if (iphc1 & LOWPAN_IPHC_SAC) {
+ /* Source address context based uncompression */
+ pr_debug("SAC bit is set. Handle context based source address.\n");
+ err = lowpan_uncompress_context_based_src_addr(
+ skb, &hdr.saddr, tmp);
+ } else {
+ /* Source address uncompression */
+ pr_debug("source address stateless compression\n");
+ err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr);
+ }
+
+ /* Check on error of previous branch */
if (err)
goto drop;
@@ -931,23 +1028,14 @@ lowpan_process_data(struct sk_buff *skb)
pr_debug("dest: context-based mcast compression\n");
/* TODO: implement this */
} else {
- u8 prefix[] = {0xff, 0x02};
-
- pr_debug("dest: non context-based mcast compression\n");
- if (0 < tmp && tmp < 3) {
- if (lowpan_fetch_skb_u8(skb, &prefix[1]))
- goto drop;
- }
-
- err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
- lowpan_unc_mxconf[tmp], NULL);
+ err = lowpan_uncompress_multicast_daddr(
+ skb, &hdr.daddr, tmp);
if (err)
goto drop;
}
} else {
pr_debug("dest: stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
- lowpan_unc_llconf[tmp], skb->data);
+ err = lowpan_uncompress_addr(skb, &hdr.daddr, tmp, _daddr);
if (err)
goto drop;
}
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 4b8f917658b5..2869c0526dad 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -193,10 +193,12 @@
/* Values of fields within the IPHC encoding second byte */
#define LOWPAN_IPHC_CID 0x80
+#define LOWPAN_IPHC_ADDR_00 0x00
+#define LOWPAN_IPHC_ADDR_01 0x01
+#define LOWPAN_IPHC_ADDR_02 0x02
+#define LOWPAN_IPHC_ADDR_03 0x03
+
#define LOWPAN_IPHC_SAC 0x40
-#define LOWPAN_IPHC_SAM_00 0x00
-#define LOWPAN_IPHC_SAM_01 0x10
-#define LOWPAN_IPHC_SAM_10 0x20
#define LOWPAN_IPHC_SAM 0x30
#define LOWPAN_IPHC_SAM_BIT 4
@@ -230,4 +232,16 @@
dest = 16 bit inline */
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
+static inline bool lowpan_fetch_skb(struct sk_buff *skb,
+ void *data, const unsigned int len)
+{
+ if (unlikely(!pskb_may_pull(skb, len)))
+ return true;
+
+ skb_copy_from_linear_data(skb, data, len);
+ skb_pull(skb, len);
+
+ return false;
+}
+
#endif /* __6LOWPAN_H__ */
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 37cf1a6ea3ad..05c57f0fcabe 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -259,22 +259,6 @@ config IP_PIMSM_V2
gated-5). This routing protocol is not used widely, so say N unless
you want to play with it.
-config ARPD
- bool "IP: ARP daemon support"
- ---help---
- The kernel maintains an internal cache which maps IP addresses to
- hardware addresses on the local network, so that Ethernet
- frames are sent to the proper address on the physical networking
- layer. Normally, kernel uses the ARP protocol to resolve these
- mappings.
-
- Saying Y here adds support to have an user space daemon to do this
- resolution instead. This is useful for implementing an alternate
- address resolution protocol (e.g. NHRP on mGRE tunnels) and also for
- testing purposes.
-
- If unsure, say N.
-
config SYN_COOKIES
bool "IP: TCP syncookie support"
---help---
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index b4d0be2b7ce9..7a1874b7b8fd 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1532,18 +1532,6 @@ int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
}
EXPORT_SYMBOL_GPL(snmp_mib_init);
-void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
-{
- int i;
-
- BUG_ON(ptr == NULL);
- for (i = 0; i < SNMP_ARRAY_SZ; i++) {
- free_percpu(ptr[i]);
- ptr[i] = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(snmp_mib_free);
-
#ifdef CONFIG_IP_MULTICAST
static const struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 4429b013f269..7808093cede6 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -368,9 +368,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
} else {
probes -= neigh->parms->app_probes;
if (probes < 0) {
-#ifdef CONFIG_ARPD
neigh_app_ns(neigh);
-#endif
return;
}
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 34ca6d5a3a4b..a1b5bcbd04ae 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -73,6 +73,8 @@ static struct ipv4_devconf ipv4_devconf = {
[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
+ [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
+ [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
},
};
@@ -83,6 +85,8 @@ static struct ipv4_devconf ipv4_devconf_dflt = {
[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
+ [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
+ [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
},
};
@@ -1126,10 +1130,7 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
if (len < (int) sizeof(ifr))
break;
memset(&ifr, 0, sizeof(struct ifreq));
- if (ifa->ifa_label)
- strcpy(ifr.ifr_name, ifa->ifa_label);
- else
- strcpy(ifr.ifr_name, dev->name);
+ strcpy(ifr.ifr_name, ifa->ifa_label);
(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
@@ -2097,11 +2098,15 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
+ DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
+ "force_igmp_version"),
+ DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
+ "igmpv2_unsolicited_report_interval"),
+ DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
+ "igmpv3_unsolicited_report_interval"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
- DEVINET_SYSCTL_FLUSHING_ENTRY(FORCE_IGMP_VERSION,
- "force_igmp_version"),
DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
"promote_secondaries"),
DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 26aa65d1fce4..523be38e37de 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -101,6 +101,30 @@ errout:
return err;
}
+static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+{
+ struct fib_result *result = (struct fib_result *) arg->result;
+ struct net_device *dev = result->fi->fib_dev;
+
+ /* do not accept result if the route does
+ * not meet the required prefix length
+ */
+ if (result->prefixlen <= rule->suppress_prefixlen)
+ goto suppress_route;
+
+ /* do not accept result if the route uses a device
+ * belonging to a forbidden interface group
+ */
+ if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup)
+ goto suppress_route;
+
+ return false;
+
+suppress_route:
+ if (!(arg->flags & FIB_LOOKUP_NOREF))
+ fib_info_put(result->fi);
+ return true;
+}
static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
@@ -267,6 +291,7 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = {
.rule_size = sizeof(struct fib4_rule),
.addr_size = sizeof(u32),
.action = fib4_rule_action,
+ .suppress = fib4_rule_suppress,
.match = fib4_rule_match,
.configure = fib4_rule_configure,
.delete = fib4_rule_delete,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index cd71190d2962..d6c0e64ec97f 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -88,6 +88,7 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/times.h>
+#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/arp.h>
@@ -113,7 +114,8 @@
#define IGMP_V1_Router_Present_Timeout (400*HZ)
#define IGMP_V2_Router_Present_Timeout (400*HZ)
-#define IGMP_Unsolicited_Report_Interval (10*HZ)
+#define IGMP_V2_Unsolicited_Report_Interval (10*HZ)
+#define IGMP_V3_Unsolicited_Report_Interval (1*HZ)
#define IGMP_Query_Response_Interval (10*HZ)
#define IGMP_Unsolicited_Report_Count 2
@@ -138,6 +140,29 @@
((in_dev)->mr_v2_seen && \
time_before(jiffies, (in_dev)->mr_v2_seen)))
+static int unsolicited_report_interval(struct in_device *in_dev)
+{
+ int interval_ms, interval_jiffies;
+
+ if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
+ interval_ms = IN_DEV_CONF_GET(
+ in_dev,
+ IGMPV2_UNSOLICITED_REPORT_INTERVAL);
+ else /* v3 */
+ interval_ms = IN_DEV_CONF_GET(
+ in_dev,
+ IGMPV3_UNSOLICITED_REPORT_INTERVAL);
+
+ interval_jiffies = msecs_to_jiffies(interval_ms);
+
+ /* _timer functions can't handle a delay of 0 jiffies so ensure
+ * we always return a positive value.
+ */
+ if (interval_jiffies <= 0)
+ interval_jiffies = 1;
+ return interval_jiffies;
+}
+
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
static void igmpv3_clear_delrec(struct in_device *in_dev);
@@ -315,6 +340,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
if (size < 256)
return NULL;
}
+ skb->priority = TC_PRIO_CONTROL;
igmp_skb_size(skb) = size;
rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
@@ -670,6 +696,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
ip_rt_put(rt);
return -1;
}
+ skb->priority = TC_PRIO_CONTROL;
skb_dst_set(skb, &rt->dst);
@@ -719,7 +746,8 @@ static void igmp_ifc_timer_expire(unsigned long data)
igmpv3_send_cr(in_dev);
if (in_dev->mr_ifc_count) {
in_dev->mr_ifc_count--;
- igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
+ igmp_ifc_start_timer(in_dev,
+ unsolicited_report_interval(in_dev));
}
__in_dev_put(in_dev);
}
@@ -744,7 +772,7 @@ static void igmp_timer_expire(unsigned long data)
if (im->unsolicit_count) {
im->unsolicit_count--;
- igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
+ igmp_start_timer(im, unsolicited_report_interval(in_dev));
}
im->reporter = 1;
spin_unlock(&im->lock);
@@ -1323,16 +1351,17 @@ out:
EXPORT_SYMBOL(ip_mc_inc_group);
/*
- * Resend IGMP JOIN report; used for bonding.
- * Called with rcu_read_lock()
+ * Resend IGMP JOIN report; used by netdev notifier.
*/
-void ip_mc_rejoin_groups(struct in_device *in_dev)
+static void ip_mc_rejoin_groups(struct in_device *in_dev)
{
#ifdef CONFIG_IP_MULTICAST
struct ip_mc_list *im;
int type;
- for_each_pmc_rcu(in_dev, im) {
+ ASSERT_RTNL();
+
+ for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
@@ -1349,7 +1378,6 @@ void ip_mc_rejoin_groups(struct in_device *in_dev)
}
#endif
}
-EXPORT_SYMBOL(ip_mc_rejoin_groups);
/*
* A socket has left a multicast group on device dev
@@ -2735,8 +2763,42 @@ static struct pernet_operations igmp_net_ops = {
.exit = igmp_net_exit,
};
+static int igmp_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct in_device *in_dev;
+
+ switch (event) {
+ case NETDEV_RESEND_IGMP:
+ in_dev = __in_dev_get_rtnl(dev);
+ if (in_dev)
+ ip_mc_rejoin_groups(in_dev);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block igmp_notifier = {
+ .notifier_call = igmp_netdev_event,
+};
+
int __init igmp_mc_proc_init(void)
{
- return register_pernet_subsys(&igmp_net_ops);
+ int err;
+
+ err = register_pernet_subsys(&igmp_net_ops);
+ if (err)
+ return err;
+ err = register_netdevice_notifier(&igmp_notifier);
+ if (err)
+ goto reg_notif_fail;
+ return 0;
+
+reg_notif_fail:
+ unregister_pernet_subsys(&igmp_net_ops);
+ return err;
}
#endif
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8d6939eeb492..d7aea4c5b940 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -534,7 +534,7 @@ static int __net_init ipgre_init_net(struct net *net)
static void __net_exit ipgre_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipgre_link_ops);
}
static struct pernet_operations ipgre_net_ops = {
@@ -767,7 +767,7 @@ static int __net_init ipgre_tap_init_net(struct net *net)
static void __net_exit ipgre_tap_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipgre_tap_ops);
}
static struct pernet_operations ipgre_tap_net_ops = {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 15e3e683adec..054a3e97d822 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -141,6 +141,7 @@
#include <net/icmp.h>
#include <net/raw.h>
#include <net/checksum.h>
+#include <net/inet_ecn.h>
#include <linux/netfilter_ipv4.h>
#include <net/xfrm.h>
#include <linux/mroute.h>
@@ -410,6 +411,13 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
+ BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
+ BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
+ BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
+ IP_ADD_STATS_BH(dev_net(dev),
+ IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
+ max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
+
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index ca1cb2d5f6e2..ac9fabe0300f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -350,7 +350,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
struct flowi4 fl4;
struct rtable *rt;
- rt = ip_route_output_tunnel(dev_net(dev), &fl4,
+ rt = ip_route_output_tunnel(tunnel->net, &fl4,
tunnel->parms.iph.protocol,
iph->daddr, iph->saddr,
tunnel->parms.o_key,
@@ -365,7 +365,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
}
if (!tdev && tunnel->parms.link)
- tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
+ tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev) {
hlen = tdev->hard_header_len + tdev->needed_headroom;
@@ -454,15 +454,15 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
- if (tunnel->net != dev_net(tunnel->dev))
- skb_scrub_packet(skb);
-
if (tunnel->dev->type == ARPHRD_ETHER) {
skb->protocol = eth_type_trans(skb, tunnel->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
} else {
skb->dev = tunnel->dev;
}
+
+ skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
+
gro_cells_receive(&tunnel->gro_cells, skb);
return 0;
@@ -613,9 +613,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (tunnel->net != dev_net(dev))
- skb_scrub_packet(skb);
-
if (tunnel->err_count > 0) {
if (time_before(jiffies,
tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
@@ -653,9 +650,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
}
- err = iptunnel_xmit(dev_net(dev), rt, skb,
- fl4.saddr, fl4.daddr, protocol,
- ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df);
+ err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
+ ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df,
+ !net_eq(tunnel->net, dev_net(dev)));
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return;
@@ -820,11 +817,10 @@ static void ip_tunnel_dev_free(struct net_device *dev)
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
{
- struct net *net = dev_net(dev);
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_net *itn;
- itn = net_generic(net, tunnel->ip_tnl_net_id);
+ itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
if (itn->fb_tunnel_dev != dev) {
ip_tunnel_del(netdev_priv(dev));
@@ -838,56 +834,68 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
{
struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
struct ip_tunnel_parm parms;
+ unsigned int i;
- itn->tunnels = kzalloc(IP_TNL_HASH_SIZE * sizeof(struct hlist_head), GFP_KERNEL);
- if (!itn->tunnels)
- return -ENOMEM;
+ for (i = 0; i < IP_TNL_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&itn->tunnels[i]);
if (!ops) {
itn->fb_tunnel_dev = NULL;
return 0;
}
+
memset(&parms, 0, sizeof(parms));
if (devname)
strlcpy(parms.name, devname, IFNAMSIZ);
rtnl_lock();
itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
+ /* FB netdevice is special: we have one, and only one per netns.
+ * Allowing to move it to another netns is clearly unsafe.
+ */
+ if (!IS_ERR(itn->fb_tunnel_dev))
+ itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
rtnl_unlock();
- if (IS_ERR(itn->fb_tunnel_dev)) {
- kfree(itn->tunnels);
- return PTR_ERR(itn->fb_tunnel_dev);
- }
- return 0;
+ return PTR_RET(itn->fb_tunnel_dev);
}
EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
-static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
+static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
+ struct rtnl_link_ops *ops)
{
+ struct net *net = dev_net(itn->fb_tunnel_dev);
+ struct net_device *dev, *aux;
int h;
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == ops)
+ unregister_netdevice_queue(dev, head);
+
for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
struct ip_tunnel *t;
struct hlist_node *n;
struct hlist_head *thead = &itn->tunnels[h];
hlist_for_each_entry_safe(t, n, thead, hash_node)
- unregister_netdevice_queue(t->dev, head);
+ /* If dev is in the same netns, it has already
+ * been added to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(t->dev), net))
+ unregister_netdevice_queue(t->dev, head);
}
if (itn->fb_tunnel_dev)
unregister_netdevice_queue(itn->fb_tunnel_dev, head);
}
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
{
LIST_HEAD(list);
rtnl_lock();
- ip_tunnel_destroy(itn, &list);
+ ip_tunnel_destroy(itn, &list, ops);
unregister_netdevice_many(&list);
rtnl_unlock();
- kfree(itn->tunnels);
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
@@ -929,23 +937,21 @@ EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p)
{
- struct ip_tunnel *t, *nt;
- struct net *net = dev_net(dev);
+ struct ip_tunnel *t;
struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct net *net = tunnel->net;
struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
if (dev == itn->fb_tunnel_dev)
return -EINVAL;
- nt = netdev_priv(dev);
-
t = ip_tunnel_find(itn, p, dev->type);
if (t) {
if (t->dev != dev)
return -EEXIST;
} else {
- t = nt;
+ t = tunnel;
if (dev->type != ARPHRD_ETHER) {
unsigned int nflags = 0;
@@ -984,6 +990,7 @@ int ip_tunnel_init(struct net_device *dev)
}
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
iph->version = 4;
iph->ihl = 5;
@@ -994,8 +1001,8 @@ EXPORT_SYMBOL_GPL(ip_tunnel_init);
void ip_tunnel_uninit(struct net_device *dev)
{
- struct net *net = dev_net(dev);
struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct net *net = tunnel->net;
struct ip_tunnel_net *itn;
itn = net_generic(net, tunnel->ip_tnl_net_id);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 850525b34899..d6c856b17fd4 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -46,19 +46,17 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
-int iptunnel_xmit(struct net *net, struct rtable *rt,
- struct sk_buff *skb,
+int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df)
+ __u8 tos, __u8 ttl, __be16 df, bool xnet)
{
int pkt_len = skb->len;
struct iphdr *iph;
int err;
- nf_reset(skb);
- secpath_reset(skb);
+ skb_scrub_packet(skb, xnet);
+
skb->rxhash = 0;
- skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 17cc0ffa8c0d..e805e7b3030e 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -44,176 +44,10 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#define HASH_SIZE 16
-#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
-
static struct rtnl_link_ops vti_link_ops __read_mostly;
static int vti_net_id __read_mostly;
-struct vti_net {
- struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_wc[1];
- struct ip_tunnel __rcu **tunnels[4];
-
- struct net_device *fb_tunnel_dev;
-};
-
-static int vti_fb_tunnel_init(struct net_device *dev);
static int vti_tunnel_init(struct net_device *dev);
-static void vti_tunnel_setup(struct net_device *dev);
-static void vti_dev_free(struct net_device *dev);
-static int vti_tunnel_bind_dev(struct net_device *dev);
-
-#define VTI_XMIT(stats1, stats2) do { \
- int err; \
- int pkt_len = skb->len; \
- err = dst_output(skb); \
- if (net_xmit_eval(err) == 0) { \
- u64_stats_update_begin(&(stats1)->syncp); \
- (stats1)->tx_bytes += pkt_len; \
- (stats1)->tx_packets++; \
- u64_stats_update_end(&(stats1)->syncp); \
- } else { \
- (stats2)->tx_errors++; \
- (stats2)->tx_aborted_errors++; \
- } \
-} while (0)
-
-
-static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
- __be32 remote, __be32 local)
-{
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(local);
- struct ip_tunnel *t;
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
- if (local == t->parms.iph.saddr &&
- remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
- return t;
- for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
- if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
- return t;
-
- for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
- if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
- return t;
-
- for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0])
- if (t && (t->dev->flags&IFF_UP))
- return t;
- return NULL;
-}
-
-static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
- struct ip_tunnel_parm *parms)
-{
- __be32 remote = parms->iph.daddr;
- __be32 local = parms->iph.saddr;
- unsigned h = 0;
- int prio = 0;
-
- if (remote) {
- prio |= 2;
- h ^= HASH(remote);
- }
- if (local) {
- prio |= 1;
- h ^= HASH(local);
- }
- return &ipn->tunnels[prio][h];
-}
-
-static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
- struct ip_tunnel *t)
-{
- return __vti_bucket(ipn, &t->parms);
-}
-
-static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
-{
- struct ip_tunnel __rcu **tp;
- struct ip_tunnel *iter;
-
- for (tp = vti_bucket(ipn, t);
- (iter = rtnl_dereference(*tp)) != NULL;
- tp = &iter->next) {
- if (t == iter) {
- rcu_assign_pointer(*tp, t->next);
- break;
- }
- }
-}
-
-static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
-{
- struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
-
- rcu_assign_pointer(t->next, rtnl_dereference(*tp));
- rcu_assign_pointer(*tp, t);
-}
-
-static struct ip_tunnel *vti_tunnel_locate(struct net *net,
- struct ip_tunnel_parm *parms,
- int create)
-{
- __be32 remote = parms->iph.daddr;
- __be32 local = parms->iph.saddr;
- struct ip_tunnel *t, *nt;
- struct ip_tunnel __rcu **tp;
- struct net_device *dev;
- char name[IFNAMSIZ];
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- for (tp = __vti_bucket(ipn, parms);
- (t = rtnl_dereference(*tp)) != NULL;
- tp = &t->next) {
- if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
- return t;
- }
- if (!create)
- return NULL;
-
- if (parms->name[0])
- strlcpy(name, parms->name, IFNAMSIZ);
- else
- strcpy(name, "vti%d");
-
- dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
- if (dev == NULL)
- return NULL;
-
- dev_net_set(dev, net);
-
- nt = netdev_priv(dev);
- nt->parms = *parms;
- dev->rtnl_link_ops = &vti_link_ops;
-
- vti_tunnel_bind_dev(dev);
-
- if (register_netdevice(dev) < 0)
- goto failed_free;
-
- dev_hold(dev);
- vti_tunnel_link(ipn, nt);
- return nt;
-
-failed_free:
- free_netdev(dev);
- return NULL;
-}
-
-static void vti_tunnel_uninit(struct net_device *dev)
-{
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- vti_tunnel_unlink(ipn, netdev_priv(dev));
- dev_put(dev);
-}
static int vti_err(struct sk_buff *skb, u32 info)
{
@@ -222,6 +56,8 @@ static int vti_err(struct sk_buff *skb, u32 info)
* 8 bytes of packet payload. It means, that precise relaying of
* ICMP in the real Internet is absolutely infeasible.
*/
+ struct net *net = dev_net(skb->dev);
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
@@ -252,7 +88,8 @@ static int vti_err(struct sk_buff *skb, u32 info)
err = -ENOENT;
- t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
+ t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->daddr, iph->saddr, 0);
if (t == NULL)
goto out;
@@ -281,8 +118,11 @@ static int vti_rcv(struct sk_buff *skb)
{
struct ip_tunnel *tunnel;
const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = dev_net(skb->dev);
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
- tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->saddr, iph->daddr, 0);
if (tunnel != NULL) {
struct pcpu_tstats *tstats;
@@ -311,7 +151,6 @@ static int vti_rcv(struct sk_buff *skb)
static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct pcpu_tstats *tstats;
struct iphdr *tiph = &tunnel->parms.iph;
u8 tos;
struct rtable *rt; /* Route to the other host */
@@ -319,6 +158,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct iphdr *old_iph = ip_hdr(skb);
__be32 dst = tiph->daddr;
struct flowi4 fl4;
+ int err;
if (skb->protocol != htons(ETH_P_IP))
goto tx_error;
@@ -367,8 +207,10 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
skb->dev = skb_dst(skb)->dev;
- tstats = this_cpu_ptr(dev->tstats);
- VTI_XMIT(tstats, &dev->stats);
+ err = dst_output(skb);
+ if (net_xmit_eval(err) == 0)
+ err = skb->len;
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
tx_error_icmp:
@@ -379,198 +221,57 @@ tx_error:
return NETDEV_TX_OK;
}
-static int vti_tunnel_bind_dev(struct net_device *dev)
-{
- struct net_device *tdev = NULL;
- struct ip_tunnel *tunnel;
- struct iphdr *iph;
-
- tunnel = netdev_priv(dev);
- iph = &tunnel->parms.iph;
-
- if (iph->daddr) {
- struct rtable *rt;
- struct flowi4 fl4;
- memset(&fl4, 0, sizeof(fl4));
- flowi4_init_output(&fl4, tunnel->parms.link,
- be32_to_cpu(tunnel->parms.i_key),
- RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
- IPPROTO_IPIP, 0,
- iph->daddr, iph->saddr, 0, 0);
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (!IS_ERR(rt)) {
- tdev = rt->dst.dev;
- ip_rt_put(rt);
- }
- dev->flags |= IFF_POINTOPOINT;
- }
-
- if (!tdev && tunnel->parms.link)
- tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
-
- if (tdev) {
- dev->hard_header_len = tdev->hard_header_len +
- sizeof(struct iphdr);
- dev->mtu = tdev->mtu;
- }
- dev->iflink = tunnel->parms.link;
- return dev->mtu;
-}
-
static int
vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip_tunnel_parm p;
- struct ip_tunnel *t;
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- switch (cmd) {
- case SIOCGETTUNNEL:
- t = NULL;
- if (dev == ipn->fb_tunnel_dev) {
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
- sizeof(p))) {
- err = -EFAULT;
- break;
- }
- t = vti_tunnel_locate(net, &p, 0);
- }
- if (t == NULL)
- t = netdev_priv(dev);
- memcpy(&p, &t->parms, sizeof(p));
- p.i_flags |= GRE_KEY | VTI_ISVTI;
- p.o_flags |= GRE_KEY;
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
- err = -EFAULT;
- break;
-
- case SIOCADDTUNNEL:
- case SIOCCHGTUNNEL:
- err = -EPERM;
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
- goto done;
- err = -EFAULT;
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
- goto done;
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+ return -EFAULT;
- err = -EINVAL;
+ if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
p.iph.ihl != 5)
- goto done;
-
- t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
-
- if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
- if (t != NULL) {
- if (t->dev != dev) {
- err = -EEXIST;
- break;
- }
- } else {
- if (((dev->flags&IFF_POINTOPOINT) &&
- !p.iph.daddr) ||
- (!(dev->flags&IFF_POINTOPOINT) &&
- p.iph.daddr)) {
- err = -EINVAL;
- break;
- }
- t = netdev_priv(dev);
- vti_tunnel_unlink(ipn, t);
- synchronize_net();
- t->parms.iph.saddr = p.iph.saddr;
- t->parms.iph.daddr = p.iph.daddr;
- t->parms.i_key = p.i_key;
- t->parms.o_key = p.o_key;
- t->parms.iph.protocol = IPPROTO_IPIP;
- memcpy(dev->dev_addr, &p.iph.saddr, 4);
- memcpy(dev->broadcast, &p.iph.daddr, 4);
- vti_tunnel_link(ipn, t);
- netdev_state_change(dev);
- }
- }
-
- if (t) {
- err = 0;
- if (cmd == SIOCCHGTUNNEL) {
- t->parms.i_key = p.i_key;
- t->parms.o_key = p.o_key;
- if (t->parms.link != p.link) {
- t->parms.link = p.link;
- vti_tunnel_bind_dev(dev);
- netdev_state_change(dev);
- }
- }
- p.i_flags |= GRE_KEY | VTI_ISVTI;
- p.o_flags |= GRE_KEY;
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
- sizeof(p)))
- err = -EFAULT;
- } else
- err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
- break;
+ return -EINVAL;
+ }
- case SIOCDELTUNNEL:
- err = -EPERM;
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
- goto done;
-
- if (dev == ipn->fb_tunnel_dev) {
- err = -EFAULT;
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
- sizeof(p)))
- goto done;
- err = -ENOENT;
-
- t = vti_tunnel_locate(net, &p, 0);
- if (t == NULL)
- goto done;
- err = -EPERM;
- if (t->dev == ipn->fb_tunnel_dev)
- goto done;
- dev = t->dev;
- }
- unregister_netdevice(dev);
- err = 0;
- break;
+ err = ip_tunnel_ioctl(dev, &p, cmd);
+ if (err)
+ return err;
- default:
- err = -EINVAL;
+ if (cmd != SIOCDELTUNNEL) {
+ p.i_flags |= GRE_KEY | VTI_ISVTI;
+ p.o_flags |= GRE_KEY;
}
-done:
- return err;
-}
-
-static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < 68 || new_mtu > 0xFFF8)
- return -EINVAL;
- dev->mtu = new_mtu;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+ return -EFAULT;
return 0;
}
static const struct net_device_ops vti_netdev_ops = {
.ndo_init = vti_tunnel_init,
- .ndo_uninit = vti_tunnel_uninit,
+ .ndo_uninit = ip_tunnel_uninit,
.ndo_start_xmit = vti_tunnel_xmit,
.ndo_do_ioctl = vti_tunnel_ioctl,
- .ndo_change_mtu = vti_tunnel_change_mtu,
+ .ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
};
-static void vti_dev_free(struct net_device *dev)
+static void vti_tunnel_setup(struct net_device *dev)
{
- free_percpu(dev->tstats);
- free_netdev(dev);
+ dev->netdev_ops = &vti_netdev_ops;
+ ip_tunnel_setup(dev, vti_net_id);
}
-static void vti_tunnel_setup(struct net_device *dev)
+static int vti_tunnel_init(struct net_device *dev)
{
- dev->netdev_ops = &vti_netdev_ops;
- dev->destructor = vti_dev_free;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct iphdr *iph = &tunnel->parms.iph;
+
+ memcpy(dev->dev_addr, &iph->saddr, 4);
+ memcpy(dev->broadcast, &iph->daddr, 4);
dev->type = ARPHRD_TUNNEL;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
@@ -581,38 +282,18 @@ static void vti_tunnel_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_LLTX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
-}
-static int vti_tunnel_init(struct net_device *dev)
-{
- struct ip_tunnel *tunnel = netdev_priv(dev);
-
- tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
-
- memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
- memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
-
- dev->tstats = alloc_percpu(struct pcpu_tstats);
- if (!dev->tstats)
- return -ENOMEM;
-
- return 0;
+ return ip_tunnel_init(dev);
}
-static int __net_init vti_fb_tunnel_init(struct net_device *dev)
+static void __net_init vti_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
- struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
iph->version = 4;
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
-
- dev_hold(dev);
- rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
- return 0;
}
static struct xfrm_tunnel vti_handler __read_mostly = {
@@ -621,76 +302,30 @@ static struct xfrm_tunnel vti_handler __read_mostly = {
.priority = 1,
};
-static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
-{
- int prio;
-
- for (prio = 1; prio < 4; prio++) {
- int h;
- for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t;
-
- t = rtnl_dereference(ipn->tunnels[prio][h]);
- while (t != NULL) {
- unregister_netdevice_queue(t->dev, head);
- t = rtnl_dereference(t->next);
- }
- }
- }
-}
-
static int __net_init vti_init_net(struct net *net)
{
int err;
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- ipn->tunnels[0] = ipn->tunnels_wc;
- ipn->tunnels[1] = ipn->tunnels_l;
- ipn->tunnels[2] = ipn->tunnels_r;
- ipn->tunnels[3] = ipn->tunnels_r_l;
-
- ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
- "ip_vti0",
- vti_tunnel_setup);
- if (!ipn->fb_tunnel_dev) {
- err = -ENOMEM;
- goto err_alloc_dev;
- }
- dev_net_set(ipn->fb_tunnel_dev, net);
-
- err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
- if (err)
- goto err_reg_dev;
- ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
+ struct ip_tunnel_net *itn;
- err = register_netdev(ipn->fb_tunnel_dev);
+ err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
if (err)
- goto err_reg_dev;
+ return err;
+ itn = net_generic(net, vti_net_id);
+ vti_fb_tunnel_init(itn->fb_tunnel_dev);
return 0;
-
-err_reg_dev:
- vti_dev_free(ipn->fb_tunnel_dev);
-err_alloc_dev:
- /* nothing */
- return err;
}
static void __net_exit vti_exit_net(struct net *net)
{
- struct vti_net *ipn = net_generic(net, vti_net_id);
- LIST_HEAD(list);
-
- rtnl_lock();
- vti_destroy_tunnels(ipn, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+ ip_tunnel_delete_net(itn, &vti_link_ops);
}
static struct pernet_operations vti_net_ops = {
.init = vti_init_net,
.exit = vti_exit_net,
.id = &vti_net_id,
- .size = sizeof(struct vti_net),
+ .size = sizeof(struct ip_tunnel_net),
};
static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -728,78 +363,19 @@ static void vti_netlink_parms(struct nlattr *data[],
static int vti_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct ip_tunnel *nt;
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
- int mtu;
- int err;
-
- nt = netdev_priv(dev);
- vti_netlink_parms(data, &nt->parms);
-
- if (vti_tunnel_locate(net, &nt->parms, 0))
- return -EEXIST;
+ struct ip_tunnel_parm parms;
- mtu = vti_tunnel_bind_dev(dev);
- if (!tb[IFLA_MTU])
- dev->mtu = mtu;
-
- err = register_netdevice(dev);
- if (err)
- goto out;
-
- dev_hold(dev);
- vti_tunnel_link(ipn, nt);
-
-out:
- return err;
+ vti_netlink_parms(data, &parms);
+ return ip_tunnel_newlink(dev, tb, &parms);
}
static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
- struct ip_tunnel *t, *nt;
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
struct ip_tunnel_parm p;
- int mtu;
-
- if (dev == ipn->fb_tunnel_dev)
- return -EINVAL;
- nt = netdev_priv(dev);
vti_netlink_parms(data, &p);
-
- t = vti_tunnel_locate(net, &p, 0);
-
- if (t) {
- if (t->dev != dev)
- return -EEXIST;
- } else {
- t = nt;
-
- vti_tunnel_unlink(ipn, t);
- t->parms.iph.saddr = p.iph.saddr;
- t->parms.iph.daddr = p.iph.daddr;
- t->parms.i_key = p.i_key;
- t->parms.o_key = p.o_key;
- if (dev->type != ARPHRD_ETHER) {
- memcpy(dev->dev_addr, &p.iph.saddr, 4);
- memcpy(dev->broadcast, &p.iph.daddr, 4);
- }
- vti_tunnel_link(ipn, t);
- netdev_state_change(dev);
- }
-
- if (t->parms.link != p.link) {
- t->parms.link = p.link;
- mtu = vti_tunnel_bind_dev(dev);
- if (!tb[IFLA_MTU])
- dev->mtu = mtu;
- netdev_state_change(dev);
- }
-
- return 0;
+ return ip_tunnel_changelink(dev, tb, &p);
}
static size_t vti_get_size(const struct net_device *dev)
@@ -865,7 +441,7 @@ static int __init vti_init(void)
err = xfrm4_mode_tunnel_input_register(&vti_handler);
if (err < 0) {
unregister_pernet_device(&vti_net_ops);
- pr_info(KERN_INFO "vti init: can't register tunnel\n");
+ pr_info("vti init: can't register tunnel\n");
}
err = rtnl_link_register(&vti_link_ops);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index b3ac3c3f6219..7f80fb4b82d3 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -285,7 +285,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->iflink = 0;
dev->addr_len = 4;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_LLTX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -436,7 +435,7 @@ static int __net_init ipip_init_net(struct net *net)
static void __net_exit ipip_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipip_link_ops);
}
static struct pernet_operations ipip_net_ops = {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 132a09664704..9ae54b09254f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -127,9 +127,9 @@ static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
static void ipmr_free_table(struct mr_table *mrt);
-static int ip_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc_cache *cache,
- int local);
+static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local);
static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -1795,9 +1795,9 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
/* "local" means that we should preserve one skb (for local delivery) */
-static int ip_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc_cache *cache,
- int local)
+static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local)
{
int psend = -1;
int vif, ct;
@@ -1903,14 +1903,13 @@ last_forward:
ipmr_queue_xmit(net, mrt, skb2, cache, psend);
} else {
ipmr_queue_xmit(net, mrt, skb, cache, psend);
- return 0;
+ return;
}
}
dont_forward:
if (!local)
kfree_skb(skb);
- return 0;
}
static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
@@ -2068,9 +2067,8 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = CHECKSUM_NONE;
- skb->pkt_type = PACKET_HOST;
- skb_tunnel_rx(skb, reg_dev);
+ skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
netif_rx(skb);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 4e9028017428..1657e39b291f 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -110,6 +110,19 @@ config IP_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_SYNPROXY
+ tristate "SYNPROXY target support"
+ depends on NF_CONNTRACK && NETFILTER_ADVANCED
+ select NETFILTER_SYNPROXY
+ select SYN_COOKIES
+ help
+ The SYNPROXY target allows you to intercept TCP connections and
+ establish them using syncookies before they are passed on to the
+ server. This allows to avoid conntrack and server resource usage
+ during SYN-flood attacks.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP_NF_TARGET_ULOG
tristate "ULOG target support (obsolete)"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 007b128eecc9..3622b248b6dd 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
+obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
# generic ARP tables
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 30e4de940567..00352ce0f0de 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -118,7 +118,7 @@ static int masq_device_event(struct notifier_block *this,
NF_CT_ASSERT(dev->ifindex != 0);
nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex);
+ (void *)(long)dev->ifindex, 0, 0);
}
return NOTIFY_DONE;
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 04b18c1ac345..b969131ad1c1 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -119,7 +119,26 @@ static void send_reset(struct sk_buff *oldskb, int hook)
nf_ct_attach(nskb, oldskb);
- ip_local_out(nskb);
+#ifdef CONFIG_BRIDGE_NETFILTER
+ /* If we use ip_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ niph->tot_len = htons(nskb->len);
+ ip_send_check(niph);
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ goto free_nskb;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip_local_out(nskb);
+
return;
free_nskb:
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
new file mode 100644
index 000000000000..67e17dcda65e
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_SYNPROXY.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+
+static struct iphdr *
+synproxy_build_ip(struct sk_buff *skb, u32 saddr, u32 daddr)
+{
+ struct iphdr *iph;
+
+ skb_reset_network_header(skb);
+ iph = (struct iphdr *)skb_put(skb, sizeof(*iph));
+ iph->version = 4;
+ iph->ihl = sizeof(*iph) / 4;
+ iph->tos = 0;
+ iph->id = 0;
+ iph->frag_off = htons(IP_DF);
+ iph->ttl = sysctl_ip_default_ttl;
+ iph->protocol = IPPROTO_TCP;
+ iph->check = 0;
+ iph->saddr = saddr;
+ iph->daddr = daddr;
+
+ return iph;
+}
+
+static void
+synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+ struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
+ struct iphdr *niph, struct tcphdr *nth,
+ unsigned int tcp_hdr_size)
+{
+ nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+ nskb->csum_start = (unsigned char *)nth - nskb->head;
+ nskb->csum_offset = offsetof(struct tcphdr, check);
+
+ skb_dst_set_noref(nskb, skb_dst(skb));
+ nskb->protocol = htons(ETH_P_IP);
+ if (ip_route_me_harder(nskb, RTN_UNSPEC))
+ goto free_nskb;
+
+ if (nfct) {
+ nskb->nfct = nfct;
+ nskb->nfctinfo = ctinfo;
+ nf_conntrack_get(nfct);
+ }
+
+ ip_local_out(nskb);
+ return;
+
+free_nskb:
+ kfree_skb(nskb);
+}
+
+static void
+synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct iphdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+ u16 mss = opts->mss;
+
+ iph = ip_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->dest;
+ nth->dest = th->source;
+ nth->seq = htonl(__cookie_v4_init_sequence(iph, th, &mss));
+ nth->ack_seq = htonl(ntohl(th->seq) + 1);
+ tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK;
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ tcp_flag_word(nth) |= TCP_FLAG_ECE;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = 0;
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_server_syn(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts, u32 recv_seq)
+{
+ struct sk_buff *nskb;
+ struct iphdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ip_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->source;
+ nth->dest = th->dest;
+ nth->seq = htonl(recv_seq - 1);
+ /* ack_seq is used to relay our ISN to the synproxy hook to initialize
+ * sequence number translation once a connection tracking entry exists.
+ */
+ nth->ack_seq = htonl(ntohl(th->ack_seq) - 1);
+ tcp_flag_word(nth) = TCP_FLAG_SYN;
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = th->window;
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+ niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_server_ack(const struct synproxy_net *snet,
+ const struct ip_ct_tcp *state,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct iphdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ip_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->dest;
+ nth->dest = th->source;
+ nth->seq = htonl(ntohl(th->ack_seq));
+ nth->ack_seq = htonl(ntohl(th->seq) + 1);
+ tcp_flag_word(nth) = TCP_FLAG_ACK;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin);
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_client_ack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct iphdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ip_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->source;
+ nth->dest = th->dest;
+ nth->seq = htonl(ntohl(th->seq) + 1);
+ nth->ack_seq = th->ack_seq;
+ tcp_flag_word(nth) = TCP_FLAG_ACK;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = ntohs(htons(th->window) >> opts->wscale);
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+}
+
+static bool
+synproxy_recv_client_ack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ struct synproxy_options *opts, u32 recv_seq)
+{
+ int mss;
+
+ mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1);
+ if (mss == 0) {
+ this_cpu_inc(snet->stats->cookie_invalid);
+ return false;
+ }
+
+ this_cpu_inc(snet->stats->cookie_valid);
+ opts->mss = mss;
+
+ if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy_check_timestamp_cookie(opts);
+
+ synproxy_send_server_syn(snet, skb, th, opts, recv_seq);
+ return true;
+}
+
+static unsigned int
+synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_synproxy_info *info = par->targinfo;
+ struct synproxy_net *snet = synproxy_pernet(dev_net(par->in));
+ struct synproxy_options opts = {};
+ struct tcphdr *th, _th;
+
+ if (nf_ip_checksum(skb, par->hooknum, par->thoff, IPPROTO_TCP))
+ return NF_DROP;
+
+ th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th);
+ if (th == NULL)
+ return NF_DROP;
+
+ synproxy_parse_options(skb, par->thoff, th, &opts);
+
+ if (th->syn && !(th->ack || th->fin || th->rst)) {
+ /* Initial SYN from client */
+ this_cpu_inc(snet->stats->syn_received);
+
+ if (th->ece && th->cwr)
+ opts.options |= XT_SYNPROXY_OPT_ECN;
+
+ opts.options &= info->options;
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy_init_timestamp_cookie(info, &opts);
+ else
+ opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
+ XT_SYNPROXY_OPT_SACK_PERM |
+ XT_SYNPROXY_OPT_ECN);
+
+ synproxy_send_client_synack(skb, th, &opts);
+ return NF_DROP;
+
+ } else if (th->ack && !(th->fin || th->rst || th->syn)) {
+ /* ACK from client */
+ synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq));
+ return NF_DROP;
+ }
+
+ return XT_CONTINUE;
+}
+
+static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ struct nf_conn_synproxy *synproxy;
+ struct synproxy_options opts = {};
+ const struct ip_ct_tcp *state;
+ struct tcphdr *th, _th;
+ unsigned int thoff;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct == NULL)
+ return NF_ACCEPT;
+
+ synproxy = nfct_synproxy(ct);
+ if (synproxy == NULL)
+ return NF_ACCEPT;
+
+ if (nf_is_loopback_packet(skb))
+ return NF_ACCEPT;
+
+ thoff = ip_hdrlen(skb);
+ th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
+ if (th == NULL)
+ return NF_DROP;
+
+ state = &ct->proto.tcp;
+ switch (state->state) {
+ case TCP_CONNTRACK_CLOSE:
+ if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
+ ntohl(th->seq) + 1);
+ break;
+ }
+
+ if (!th->syn || th->ack ||
+ CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ break;
+
+ /* Reopened connection - reset the sequence number and timestamp
+ * adjustments, they will get initialized once the connection is
+ * reestablished.
+ */
+ nf_ct_seqadj_init(ct, ctinfo, 0);
+ synproxy->tsoff = 0;
+ this_cpu_inc(snet->stats->conn_reopened);
+
+ /* fall through */
+ case TCP_CONNTRACK_SYN_SENT:
+ synproxy_parse_options(skb, thoff, th, &opts);
+
+ if (!th->syn && th->ack &&
+ CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+ /* Keep-Alives are sent with SEG.SEQ = SND.NXT-1,
+ * therefore we need to add 1 to make the SYN sequence
+ * number match the one of first SYN.
+ */
+ if (synproxy_recv_client_ack(snet, skb, th, &opts,
+ ntohl(th->seq) + 1))
+ this_cpu_inc(snet->stats->cookie_retrans);
+
+ return NF_DROP;
+ }
+
+ synproxy->isn = ntohl(th->ack_seq);
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy->its = opts.tsecr;
+ break;
+ case TCP_CONNTRACK_SYN_RECV:
+ if (!th->syn || !th->ack)
+ break;
+
+ synproxy_parse_options(skb, thoff, th, &opts);
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy->tsoff = opts.tsval - synproxy->its;
+
+ opts.options &= ~(XT_SYNPROXY_OPT_MSS |
+ XT_SYNPROXY_OPT_WSCALE |
+ XT_SYNPROXY_OPT_SACK_PERM);
+
+ swap(opts.tsval, opts.tsecr);
+ synproxy_send_server_ack(snet, state, skb, th, &opts);
+
+ nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
+
+ swap(opts.tsval, opts.tsecr);
+ synproxy_send_client_ack(snet, skb, th, &opts);
+
+ consume_skb(skb);
+ return NF_STOLEN;
+ default:
+ break;
+ }
+
+ synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
+ return NF_ACCEPT;
+}
+
+static int synproxy_tg4_check(const struct xt_tgchk_param *par)
+{
+ const struct ipt_entry *e = par->entryinfo;
+
+ if (e->ip.proto != IPPROTO_TCP ||
+ e->ip.invflags & XT_INV_PROTO)
+ return -EINVAL;
+
+ return nf_ct_l3proto_try_module_get(par->family);
+}
+
+static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par)
+{
+ nf_ct_l3proto_module_put(par->family);
+}
+
+static struct xt_target synproxy_tg4_reg __read_mostly = {
+ .name = "SYNPROXY",
+ .family = NFPROTO_IPV4,
+ .target = synproxy_tg4,
+ .targetsize = sizeof(struct xt_synproxy_info),
+ .checkentry = synproxy_tg4_check,
+ .destroy = synproxy_tg4_destroy,
+ .me = THIS_MODULE,
+};
+
+static struct nf_hook_ops ipv4_synproxy_ops[] __read_mostly = {
+ {
+ .hook = ipv4_synproxy_hook,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
+ },
+ {
+ .hook = ipv4_synproxy_hook,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
+ },
+};
+
+static int __init synproxy_tg4_init(void)
+{
+ int err;
+
+ err = nf_register_hooks(ipv4_synproxy_ops,
+ ARRAY_SIZE(ipv4_synproxy_ops));
+ if (err < 0)
+ goto err1;
+
+ err = xt_register_target(&synproxy_tg4_reg);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ nf_unregister_hooks(ipv4_synproxy_ops, ARRAY_SIZE(ipv4_synproxy_ops));
+err1:
+ return err;
+}
+
+static void __exit synproxy_tg4_exit(void)
+{
+ xt_unregister_target(&synproxy_tg4_reg);
+ nf_unregister_hooks(ipv4_synproxy_ops, ARRAY_SIZE(ipv4_synproxy_ops));
+}
+
+module_init(synproxy_tg4_init);
+module_exit(synproxy_tg4_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 0a2e0e3e95ba..86f5b34a4ed1 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -25,6 +25,7 @@
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
@@ -136,11 +137,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
/* adjust seqs for loopback traffic only in outgoing direction */
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
!nf_is_loopback_packet(skb)) {
- typeof(nf_nat_seq_adjust_hook) seq_adjust;
-
- seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
- if (!seq_adjust ||
- !seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return NF_DROP;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 746427c9e719..d7d9882d4cae 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -1082,7 +1082,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 463bd1273346..4a0335854b89 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -111,7 +111,7 @@ static const struct snmp_mib snmp4_ipstats_list[] = {
SNMP_MIB_SENTINEL
};
-/* Following RFC4293 items are displayed in /proc/net/netstat */
+/* Following items are displayed in /proc/net/netstat */
static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES),
SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
@@ -125,7 +125,12 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
+ /* Non RFC4293 fields */
SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
+ SNMP_MIB_ITEM("InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
+ SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 61e60d67adca..a86c7ae71881 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -988,7 +988,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
srcp = inet->inet_num;
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
i, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a9a54a236832..727f4365bcdf 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -112,7 +112,8 @@
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
-#define IP_MAX_MTU 0xFFF0
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFF
#define RT_GC_TIMEOUT (300*HZ)
@@ -435,12 +436,12 @@ static inline int ip_rt_proc_init(void)
static inline bool rt_is_expired(const struct rtable *rth)
{
- return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
+ return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
}
void rt_cache_flush(struct net *net)
{
- rt_genid_bump(net);
+ rt_genid_bump_ipv4(net);
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
@@ -1227,10 +1228,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = 576;
}
- if (mtu > IP_MAX_MTU)
- mtu = IP_MAX_MTU;
-
- return mtu;
+ return min_t(unsigned int, mtu, IP_MAX_MTU);
}
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
@@ -1458,7 +1456,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#endif
rth->dst.output = ip_rt_bug;
- rth->rt_genid = rt_genid(dev_net(dev));
+ rth->rt_genid = rt_genid_ipv4(dev_net(dev));
rth->rt_flags = RTCF_MULTICAST;
rth->rt_type = RTN_MULTICAST;
rth->rt_is_input= 1;
@@ -1589,7 +1587,7 @@ static int __mkroute_input(struct sk_buff *skb,
goto cleanup;
}
- rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
+ rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
rth->rt_flags = flags;
rth->rt_type = res->type;
rth->rt_is_input = 1;
@@ -1760,7 +1758,7 @@ local_input:
rth->dst.tclassid = itag;
#endif
- rth->rt_genid = rt_genid(net);
+ rth->rt_genid = rt_genid_ipv4(net);
rth->rt_flags = flags|RTCF_LOCAL;
rth->rt_type = res.type;
rth->rt_is_input = 1;
@@ -1945,7 +1943,7 @@ add:
rth->dst.output = ip_output;
- rth->rt_genid = rt_genid(dev_net(dev_out));
+ rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
rth->rt_flags = flags;
rth->rt_type = type;
rth->rt_is_input = 0;
@@ -2227,7 +2225,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_iif = ort->rt_iif;
rt->rt_pmtu = ort->rt_pmtu;
- rt->rt_genid = rt_genid(net);
+ rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
rt->rt_gateway = ort->rt_gateway;
@@ -2665,7 +2663,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
static __net_init int rt_genid_init(struct net *net)
{
- atomic_set(&net->rt_genid, 0);
+ atomic_set(&net->ipv4.rt_genid, 0);
atomic_set(&net->fnhe_genid, 0);
get_random_bytes(&net->ipv4.dev_addr_genid,
sizeof(net->ipv4.dev_addr_genid));
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b05c96e7af8b..14a15c49129d 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -160,26 +160,33 @@ static __u16 const msstab[] = {
* Generate a syncookie. mssp points to the mss, which is returned
* rounded down to the value encoded in the cookie.
*/
-__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+ u16 *mssp)
{
- const struct iphdr *iph = ip_hdr(skb);
- const struct tcphdr *th = tcp_hdr(skb);
int mssind;
const __u16 mss = *mssp;
- tcp_synq_overflow(sk);
-
for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
if (mss >= msstab[mssind])
break;
*mssp = msstab[mssind];
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
-
return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
th->source, th->dest, ntohl(th->seq),
jiffies / (HZ * 60), mssind);
}
+EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
+
+__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
+
+ tcp_synq_overflow(sk);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+
+ return __cookie_v4_init_sequence(iph, th, mssp);
+}
/*
* This (misnamed) value is the age of syncookie which is permitted.
@@ -192,10 +199,9 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
* Check if a ack sequence number is a valid syncookie.
* Return the decoded mss if it is, or 0 if not.
*/
-static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ u32 cookie)
{
- const struct iphdr *iph = ip_hdr(skb);
- const struct tcphdr *th = tcp_hdr(skb);
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
th->source, th->dest, seq,
@@ -204,6 +210,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
+EXPORT_SYMBOL_GPL(__cookie_v4_check);
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
@@ -284,7 +291,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
goto out;
if (tcp_synq_no_recent_overflow(sk) ||
- (mss = cookie_check(skb, cookie)) == 0) {
+ (mss = __cookie_v4_check(ip_hdr(skb), th, cookie)) == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 610e324348d1..540279f4c531 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -29,6 +29,7 @@
static int zero;
static int one = 1;
static int four = 4;
+static int gso_max_segs = GSO_MAX_SEGS;
static int tcp_retr1_max = 255;
static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -559,6 +560,13 @@ static struct ctl_table ipv4_table[] = {
.extra1 = &one,
},
{
+ .procname = "tcp_notsent_lowat",
+ .data = &sysctl_tcp_notsent_lowat,
+ .maxlen = sizeof(sysctl_tcp_notsent_lowat),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "tcp_rmem",
.data = &sysctl_tcp_rmem,
.maxlen = sizeof(sysctl_tcp_rmem),
@@ -754,6 +762,15 @@ static struct ctl_table ipv4_table[] = {
.extra2 = &four,
},
{
+ .procname = "tcp_min_tso_segs",
+ .data = &sysctl_tcp_min_tso_segs,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &gso_max_segs,
+ },
+ {
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b2f6c74861af..6e5617b9f9db 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -283,6 +283,8 @@
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
+int sysctl_tcp_min_tso_segs __read_mostly = 2;
+
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
@@ -410,10 +412,6 @@ void tcp_init_sock(struct sock *sk)
icsk->icsk_sync_mss = tcp_sync_mss;
- /* Presumed zeroed, in order of appearance:
- * cookie_in_always, cookie_out_never,
- * s_data_constant, s_data_in, s_data_out
- */
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -499,7 +497,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
@@ -510,7 +508,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
* wspace test but before the flags are set,
* IO signal will be lost.
*/
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ if (sk_stream_is_writeable(sk))
mask |= POLLOUT | POLLWRNORM;
}
} else
@@ -789,12 +787,28 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
xmit_size_goal = mss_now;
if (large_allowed && sk_can_gso(sk)) {
- xmit_size_goal = ((sk->sk_gso_max_size - 1) -
- inet_csk(sk)->icsk_af_ops->net_header_len -
- inet_csk(sk)->icsk_ext_hdr_len -
- tp->tcp_header_len);
+ u32 gso_size, hlen;
+
+ /* Maybe we should/could use sk->sk_prot->max_header here ? */
+ hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
+ inet_csk(sk)->icsk_ext_hdr_len +
+ tp->tcp_header_len;
+
+ /* Goal is to send at least one packet per ms,
+ * not one big TSO packet every 100 ms.
+ * This preserves ACK clocking and is consistent
+ * with tcp_tso_should_defer() heuristic.
+ */
+ gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
+ gso_size = max_t(u32, gso_size,
+ sysctl_tcp_min_tso_segs * mss_now);
+
+ xmit_size_goal = min_t(u32, gso_size,
+ sk->sk_gso_max_size - 1 - hlen);
- /* TSQ : try to have two TSO segments in flight */
+ /* TSQ : try to have at least two segments in flight
+ * (one in NIC TX ring, another in Qdisc)
+ */
xmit_size_goal = min_t(u32, xmit_size_goal,
sysctl_tcp_limit_output_bytes >> 1);
@@ -2454,10 +2468,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_THIN_DUPACK:
if (val < 0 || val > 1)
err = -EINVAL;
- else
+ else {
tp->thin_dupack = val;
if (tp->thin_dupack)
tcp_disable_early_retrans(tp);
+ }
break;
case TCP_REPAIR:
@@ -2638,6 +2653,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
else
tp->tsoffset = val - tcp_time_stamp;
break;
+ case TCP_NOTSENT_LOWAT:
+ tp->notsent_lowat = val;
+ sk->sk_write_space(sk);
+ break;
default:
err = -ENOPROTOOPT;
break;
@@ -2854,6 +2873,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_TIMESTAMP:
val = tcp_time_stamp + tp->tsoffset;
break;
+ case TCP_NOTSENT_LOWAT:
+ val = tp->notsent_lowat;
+ break;
default:
return -ENOPROTOOPT;
}
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 8f7ef0ad80e5..ab7bd35bb312 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -58,23 +58,22 @@ error: kfree(ctx);
return err;
}
-/* Computes the fastopen cookie for the peer.
- * The peer address is a 128 bits long (pad with zeros for IPv4).
+/* Computes the fastopen cookie for the IP path.
+ * The path is a 128 bits long (pad with zeros for IPv4).
*
* The caller must check foc->len to determine if a valid cookie
* has been generated successfully.
*/
-void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
+void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+ struct tcp_fastopen_cookie *foc)
{
- __be32 peer_addr[4] = { addr, 0, 0, 0 };
+ __be32 path[4] = { src, dst, 0, 0 };
struct tcp_fastopen_context *ctx;
rcu_read_lock();
ctx = rcu_dereference(tcp_fastopen_ctx);
if (ctx) {
- crypto_cipher_encrypt_one(ctx->tfm,
- foc->val,
- (__u8 *)peer_addr);
+ crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
foc->len = TCP_FASTOPEN_COOKIE_SIZE;
}
rcu_read_unlock();
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3ca2139a130b..1969e16d936d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -688,6 +688,34 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
}
}
+/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
+ * Note: TCP stack does not yet implement pacing.
+ * FQ packet scheduler can be used to implement cheap but effective
+ * TCP pacing, to smooth the burst on large writes when packets
+ * in flight is significantly lower than cwnd (or rwin)
+ */
+static void tcp_update_pacing_rate(struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ u64 rate;
+
+ /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
+ rate = (u64)tp->mss_cache * 2 * (HZ << 3);
+
+ rate *= max(tp->snd_cwnd, tp->packets_out);
+
+ /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3),
+ * be conservative and assume srtt = 1 (125 us instead of 1.25 ms)
+ * We probably need usec resolution in the future.
+ * Note: This also takes care of possible srtt=0 case,
+ * when tcp_rtt_estimator() was not yet called.
+ */
+ if (tp->srtt > 8 + 2)
+ do_div(rate, tp->srtt);
+
+ sk->sk_pacing_rate = min_t(u64, rate, ~0U);
+}
+
/* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
@@ -1048,6 +1076,7 @@ struct tcp_sacktag_state {
int reord;
int fack_count;
int flag;
+ s32 rtt; /* RTT measured by SACKing never-retransmitted data */
};
/* Check if skb is fully within the SACK block. In presence of GSO skbs,
@@ -1108,7 +1137,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
- bool dup_sack, int pcount)
+ int dup_sack, int pcount, u32 xmit_time)
{
struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
@@ -1148,6 +1177,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
state->reord);
if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED;
+ /* Pick the earliest sequence sacked for RTT */
+ if (state->rtt < 0)
+ state->rtt = tcp_time_stamp - xmit_time;
}
if (sacked & TCPCB_LOST) {
@@ -1205,7 +1237,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
* tcp_highest_sack_seq() when skb is highest_sack.
*/
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
- start_seq, end_seq, dup_sack, pcount);
+ start_seq, end_seq, dup_sack, pcount,
+ TCP_SKB_CB(skb)->when);
if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
@@ -1479,7 +1512,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->end_seq,
dup_sack,
- tcp_skb_pcount(skb));
+ tcp_skb_pcount(skb),
+ TCP_SKB_CB(skb)->when);
if (!before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
@@ -1536,7 +1570,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
static int
tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
- u32 prior_snd_una)
+ u32 prior_snd_una, s32 *sack_rtt)
{
struct tcp_sock *tp = tcp_sk(sk);
const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1554,6 +1588,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
state.flag = 0;
state.reord = tp->packets_out;
+ state.rtt = -1;
if (!tp->sacked_out) {
if (WARN_ON(tp->fackets_out))
@@ -1737,6 +1772,7 @@ out:
WARN_ON((int)tp->retrans_out < 0);
WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif
+ *sack_rtt = state.rtt;
return state.flag;
}
@@ -1869,8 +1905,13 @@ void tcp_enter_loss(struct sock *sk, int how)
}
tcp_verify_left_out(tp);
- tp->reordering = min_t(unsigned int, tp->reordering,
- sysctl_tcp_reordering);
+ /* Timeout in disordered state after receiving substantial DUPACKs
+ * suggests that the degree of reordering is over-estimated.
+ */
+ if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
+ tp->sacked_out >= sysctl_tcp_reordering)
+ tp->reordering = min_t(unsigned int, tp->reordering,
+ sysctl_tcp_reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
@@ -2472,8 +2513,6 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
tcp_try_keep_open(sk);
- if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
- tcp_moderate_cwnd(tp);
} else {
tcp_cwnd_reduction(sk, prior_unsacked, 0);
}
@@ -2792,65 +2831,51 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
tcp_xmit_retransmit_queue(sk);
}
-void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
+static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+ s32 seq_rtt, s32 sack_rtt)
{
- tcp_rtt_estimator(sk, seq_rtt);
- tcp_set_rto(sk);
- inet_csk(sk)->icsk_backoff = 0;
-}
-EXPORT_SYMBOL(tcp_valid_rtt_meas);
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Prefer RTT measured from ACK's timing to TS-ECR. This is because
+ * broken middle-boxes or peers may corrupt TS-ECR fields. But
+ * Karn's algorithm forbids taking RTT if some retransmitted data
+ * is acked (RFC6298).
+ */
+ if (flag & FLAG_RETRANS_DATA_ACKED)
+ seq_rtt = -1;
+
+ if (seq_rtt < 0)
+ seq_rtt = sack_rtt;
-/* Read draft-ietf-tcplw-high-performance before mucking
- * with this code. (Supersedes RFC1323)
- */
-static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
-{
/* RTTM Rule: A TSecr value received in a segment is used to
* update the averaged RTT measurement only if the segment
* acknowledges some new data, i.e., only if it advances the
* left edge of the send window.
- *
* See draft-ietf-tcplw-high-performance-00, section 3.3.
- * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
- *
- * Changed: reset backoff as soon as we see the first valid sample.
- * If we do not, we get strongly overestimated rto. With timestamps
- * samples are accepted even from very old segments: f.e., when rtt=1
- * increases to 8, we retransmit 5 times and after 8 seconds delayed
- * answer arrives rto becomes 120 seconds! If at least one of segments
- * in window is lost... Voila. --ANK (010210)
*/
- struct tcp_sock *tp = tcp_sk(sk);
-
- tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
-}
+ if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
+ seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
-static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
-{
- /* We don't have a timestamp. Can only use
- * packets that are not retransmitted to determine
- * rtt estimates. Also, we must not reset the
- * backoff for rto until we get a non-retransmitted
- * packet. This allows us to deal with a situation
- * where the network delay has increased suddenly.
- * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
- */
+ if (seq_rtt < 0)
+ return false;
- if (flag & FLAG_RETRANS_DATA_ACKED)
- return;
+ tcp_rtt_estimator(sk, seq_rtt);
+ tcp_set_rto(sk);
- tcp_valid_rtt_meas(sk, seq_rtt);
+ /* RFC6298: only reset backoff on valid RTT measurement. */
+ inet_csk(sk)->icsk_backoff = 0;
+ return true;
}
-static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
- const s32 seq_rtt)
+/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
+static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
- if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
- tcp_ack_saw_tstamp(sk, flag);
- else if (seq_rtt >= 0)
- tcp_ack_no_tstamp(sk, seq_rtt, flag);
+ struct tcp_sock *tp = tcp_sk(sk);
+ s32 seq_rtt = -1;
+
+ if (tp->lsndtime && !tp->total_retrans)
+ seq_rtt = tcp_time_stamp - tp->lsndtime;
+ tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
}
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -2939,7 +2964,7 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
* arrived at the other end.
*/
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
- u32 prior_snd_una)
+ u32 prior_snd_una, s32 sack_rtt)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2978,8 +3003,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= acked_pcount;
flag |= FLAG_RETRANS_DATA_ACKED;
- ca_seq_rtt = -1;
- seq_rtt = -1;
} else {
ca_seq_rtt = now - scb->when;
last_ackt = skb->tstamp;
@@ -3031,6 +3054,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
+ if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) ||
+ (flag & FLAG_ACKED))
+ tcp_rearm_rto(sk);
+
if (flag & FLAG_ACKED) {
const struct tcp_congestion_ops *ca_ops
= inet_csk(sk)->icsk_ca_ops;
@@ -3040,9 +3067,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tcp_mtup_probe_success(sk);
}
- tcp_ack_update_rtt(sk, flag, seq_rtt);
- tcp_rearm_rto(sk);
-
if (tcp_is_reno(tp)) {
tcp_remove_reno_sacks(sk, pkts_acked);
} else {
@@ -3130,11 +3154,24 @@ static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
}
+/* Decide wheather to run the increase function of congestion control. */
static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
- !tcp_in_cwnd_reduction(sk);
+ if (tcp_in_cwnd_reduction(sk))
+ return false;
+
+ /* If reordering is high then always grow cwnd whenever data is
+ * delivered regardless of its ordering. Otherwise stay conservative
+ * and only grow cwnd on in-order delivery in Open state, and retain
+ * cwnd in Disordered state (RFC5681). A stretched ACK with
+ * new SACK or ECE mark may first advance cwnd here and later reduce
+ * cwnd in tcp_fastretrans_alert() based on more states.
+ */
+ if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
+ return flag & FLAG_FORWARD_PROGRESS;
+
+ return inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
+ flag & FLAG_DATA_ACKED;
}
/* Check that window update is acceptable.
@@ -3269,11 +3306,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false;
- u32 prior_in_flight;
+ u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt;
u32 prior_fackets;
int prior_packets = tp->packets_out;
const int prior_unsacked = tp->packets_out - tp->sacked_out;
int acked = 0; /* Number of packets newly acked */
+ s32 sack_rtt = -1;
/* If the ack is older than previous acks
* then we can probably ignore it.
@@ -3330,7 +3368,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
if (TCP_SKB_CB(skb)->sacked)
- flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
+ &sack_rtt);
if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
flag |= FLAG_ECE;
@@ -3349,21 +3388,18 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
acked = tp->packets_out;
- flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
+ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt);
acked -= tp->packets_out;
+ /* Advance cwnd if state allows */
+ if (tcp_may_raise_cwnd(sk, flag))
+ tcp_cong_avoid(sk, ack, prior_in_flight);
+
if (tcp_ack_is_dubious(sk, flag)) {
- /* Advance CWND, if state allows this. */
- if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
- tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, acked, prior_unsacked,
is_dupack, flag);
- } else {
- if (flag & FLAG_DATA_ACKED)
- tcp_cong_avoid(sk, ack, prior_in_flight);
}
-
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
@@ -3375,6 +3411,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (icsk->icsk_pending == ICSK_TIME_RETRANS)
tcp_schedule_loss_probe(sk);
+ if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd)
+ tcp_update_pacing_rate(sk);
return 1;
no_queue:
@@ -3402,7 +3440,8 @@ old_ack:
* If data was DSACKed, see if we can undo a cwnd reduction.
*/
if (TCP_SKB_CB(skb)->sacked) {
- flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
+ &sack_rtt);
tcp_fastretrans_alert(sk, acked, prior_unsacked,
is_dupack, flag);
}
@@ -5013,8 +5052,8 @@ discard:
* the rest is checked inline. Fast processing is turned on in
* tcp_data_queue when everything is OK.
*/
-int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, unsigned int len)
+void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -5091,7 +5130,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_ack(sk, skb, 0);
__kfree_skb(skb);
tcp_data_snd_check(sk);
- return 0;
+ return;
} else { /* Header too small */
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
goto discard;
@@ -5184,7 +5223,7 @@ no_ack:
if (eaten)
kfree_skb_partial(skb, fragstolen);
sk->sk_data_ready(sk, 0);
- return 0;
+ return;
}
}
@@ -5200,7 +5239,7 @@ slow_path:
*/
if (!tcp_validate_incoming(sk, skb, th, 1))
- return 0;
+ return;
step5:
if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
@@ -5216,7 +5255,7 @@ step5:
tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
- return 0;
+ return;
csum_error:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
@@ -5224,7 +5263,6 @@ csum_error:
discard:
__kfree_skb(skb);
- return 0;
}
EXPORT_SYMBOL(tcp_rcv_established);
@@ -5627,9 +5665,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* so release it.
*/
if (req) {
- tcp_synack_rtt_meas(sk, req);
tp->total_retrans = req->num_retrans;
-
reqsk_fastopen_remove(sk, req, false);
} else {
/* Make sure socket is routed, for correct metrics. */
@@ -5654,6 +5690,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+ tcp_synack_rtt_meas(sk, req);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b299da5ff499..b14266bb91eb 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -821,8 +821,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
*/
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
- u16 queue_mapping,
- bool nocache)
+ u16 queue_mapping)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
@@ -852,7 +851,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
{
- int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
+ int res = tcp_v4_send_synack(sk, NULL, req, 0);
if (!res)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -890,7 +889,7 @@ bool tcp_syn_flood_action(struct sock *sk,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
- if (!lopt->synflood_warned) {
+ if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
lopt->synflood_warned = 1;
pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
proto, ntohs(tcp_hdr(skb)->dest), msg);
@@ -1316,9 +1315,11 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
return true;
}
+
if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
- tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, valid_foc);
if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
memcmp(&foc->val[0], &valid_foc->val[0],
TCP_FASTOPEN_COOKIE_SIZE) != 0)
@@ -1329,14 +1330,16 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
return true;
} else if (foc->len == 0) { /* Client requesting a cookie */
- tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, valid_foc);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENCOOKIEREQD);
} else {
/* Client sent a cookie with wrong size. Treat it
* the same as invalid and return a valid one.
*/
- tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, valid_foc);
}
return false;
}
@@ -1462,7 +1465,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* limitations, they conserve resources and peer is
* evidently real one.
*/
- if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+ if ((sysctl_tcp_syncookies == 2 ||
+ inet_csk_reqsk_queue_is_full(sk)) && !isn) {
want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
if (!want_cookie)
goto drop;
@@ -1671,8 +1675,6 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
- tcp_synack_rtt_meas(newsk, req);
- newtp->total_retrans = req->num_retrans;
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
@@ -1797,10 +1799,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
sk->sk_rx_dst = NULL;
}
}
- if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
- rsk = sk;
- goto reset;
- }
+ tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
return 0;
}
@@ -2605,7 +2604,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
long delta = req->expires - jiffies;
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
i,
ireq->loc_addr,
ntohs(inet_sk(sk)->inet_sport),
@@ -2663,7 +2662,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
- "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
+ "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d%n",
i, src, srcp, dest, destp, sk->sk_state,
tp->write_seq - tp->snd_una,
rx_queue,
@@ -2802,6 +2801,7 @@ struct proto tcp_prot = {
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
+ .stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count,
.memory_allocated = &tcp_memory_allocated,
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index f6a005c485a9..4a22f3e715df 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -443,7 +443,7 @@ void tcp_init_metrics(struct sock *sk)
struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_metrics_block *tm;
- u32 val;
+ u32 val, crtt = 0; /* cached RTT scaled by 8 */
if (dst == NULL)
goto reset;
@@ -478,15 +478,19 @@ void tcp_init_metrics(struct sock *sk)
tp->reordering = val;
}
- val = tcp_metric_get(tm, TCP_METRIC_RTT);
- if (val == 0 || tp->srtt == 0) {
- rcu_read_unlock();
- goto reset;
- }
- /* Initial rtt is determined from SYN,SYN-ACK.
- * The segment is small and rtt may appear much
- * less than real one. Use per-dst memory
- * to make it more realistic.
+ crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
+ rcu_read_unlock();
+reset:
+ /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
+ * to seed the RTO for later data packets because SYN packets are
+ * small. Use the per-dst cached values to seed the RTO but keep
+ * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
+ * Later the RTO will be updated immediately upon obtaining the first
+ * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
+ * influences the first RTO but not later RTT estimation.
+ *
+ * But if RTT is not available from the SYN (due to retransmits or
+ * syn cookies) or the cache, force a conservative 3secs timeout.
*
* A bit of theory. RTT is time passed after "normal" sized packet
* is sent until it is ACKed. In normal circumstances sending small
@@ -497,21 +501,9 @@ void tcp_init_metrics(struct sock *sk)
* to low value, and then abruptly stops to do it and starts to delay
* ACKs, wait for troubles.
*/
- val = msecs_to_jiffies(val);
- if (val > tp->srtt) {
- tp->srtt = val;
- tp->rtt_seq = tp->snd_nxt;
- }
- val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
- if (val > tp->mdev) {
- tp->mdev = val;
- tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
- }
- rcu_read_unlock();
-
- tcp_set_rto(sk);
-reset:
- if (tp->srtt == 0) {
+ if (crtt > tp->srtt) {
+ inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk));
+ } else if (tp->srtt == 0) {
/* RFC6298: 5.7 We've failed to get a valid RTT sample from
* 3WHS. This is most likely due to retransmission,
* including spurious one. Reset the RTO back to 3secs
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ab1c08658528..58a3e69aef64 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -411,6 +411,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tcp_enable_early_retrans(newtp);
newtp->tlp_high_seq = 0;
+ newtp->lsndtime = treq->snt_synack;
+ newtp->total_retrans = req->num_retrans;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -666,12 +668,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!(flg & TCP_FLAG_ACK))
return NULL;
- /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
- if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
- tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
- else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */
- tcp_rsk(req)->snt_synack = 0;
-
/* For Fast Open no more processing is needed (sk is the
* child socket).
*/
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 170737a9d56d..7c83cb8bf137 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -65,6 +65,9 @@ int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
+unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
+EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
+
static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int push_one, gfp_t gfp);
@@ -1628,7 +1631,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
/* If a full-sized TSO skb can be sent, do it. */
if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
- sk->sk_gso_max_segs * tp->mss_cache))
+ tp->xmit_size_goal_segs * tp->mss_cache))
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index d4943f67aff2..611beab38a00 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -46,6 +46,10 @@ static unsigned int bufsize __read_mostly = 4096;
MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
module_param(bufsize, uint, 0);
+static unsigned int fwmark __read_mostly = 0;
+MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
+module_param(fwmark, uint, 0);
+
static int full __read_mostly;
MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
module_param(full, int, 0);
@@ -54,12 +58,16 @@ static const char procname[] = "tcpprobe";
struct tcp_log {
ktime_t tstamp;
- __be32 saddr, daddr;
- __be16 sport, dport;
+ union {
+ struct sockaddr raw;
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } src, dst;
u16 length;
u32 snd_nxt;
u32 snd_una;
u32 snd_wnd;
+ u32 rcv_wnd;
u32 snd_cwnd;
u32 ssthresh;
u32 srtt;
@@ -86,19 +94,45 @@ static inline int tcp_probe_avail(void)
return bufsize - tcp_probe_used() - 1;
}
+#define tcp_probe_copy_fl_to_si4(inet, si4, mem) \
+ do { \
+ si4.sin_family = AF_INET; \
+ si4.sin_port = inet->inet_##mem##port; \
+ si4.sin_addr.s_addr = inet->inet_##mem##addr; \
+ } while (0) \
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define tcp_probe_copy_fl_to_si6(inet, si6, mem) \
+ do { \
+ struct ipv6_pinfo *pi6 = inet->pinet6; \
+ si6.sin6_family = AF_INET6; \
+ si6.sin6_port = inet->inet_##mem##port; \
+ si6.sin6_addr = pi6->mem##addr; \
+ si6.sin6_flowinfo = 0; /* No need here. */ \
+ si6.sin6_scope_id = 0; /* No need here. */ \
+ } while (0)
+#else
+#define tcp_probe_copy_fl_to_si6(fl, si6, mem) \
+ do { \
+ memset(&si6, 0, sizeof(si6)); \
+ } while (0)
+#endif
+
/*
* Hook inserted to be called before each receive packet.
* Note: arguments must match tcp_rcv_established()!
*/
-static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned int len)
+static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
- /* Only update if port matches */
- if ((port == 0 || ntohs(inet->inet_dport) == port ||
- ntohs(inet->inet_sport) == port) &&
+ /* Only update if port or skb mark matches */
+ if (((port == 0 && fwmark == 0) ||
+ ntohs(inet->inet_dport) == port ||
+ ntohs(inet->inet_sport) == port ||
+ (fwmark > 0 && skb->mark == fwmark)) &&
(full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
spin_lock(&tcp_probe.lock);
@@ -107,15 +141,25 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
p->tstamp = ktime_get();
- p->saddr = inet->inet_saddr;
- p->sport = inet->inet_sport;
- p->daddr = inet->inet_daddr;
- p->dport = inet->inet_dport;
+ switch (sk->sk_family) {
+ case AF_INET:
+ tcp_probe_copy_fl_to_si4(inet, p->src.v4, s);
+ tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
+ break;
+ case AF_INET6:
+ tcp_probe_copy_fl_to_si6(inet, p->src.v6, s);
+ tcp_probe_copy_fl_to_si6(inet, p->dst.v6, d);
+ break;
+ default:
+ BUG();
+ }
+
p->length = skb->len;
p->snd_nxt = tp->snd_nxt;
p->snd_una = tp->snd_una;
p->snd_cwnd = tp->snd_cwnd;
p->snd_wnd = tp->snd_wnd;
+ p->rcv_wnd = tp->rcv_wnd;
p->ssthresh = tcp_current_ssthresh(sk);
p->srtt = tp->srtt >> 3;
@@ -128,7 +172,6 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
}
jprobe_return();
- return 0;
}
static struct jprobe tcp_jprobe = {
@@ -157,13 +200,11 @@ static int tcpprobe_sprint(char *tbuf, int n)
= ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
return scnprintf(tbuf, n,
- "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
+ "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
(unsigned long) tv.tv_sec,
(unsigned long) tv.tv_nsec,
- &p->saddr, ntohs(p->sport),
- &p->daddr, ntohs(p->dport),
- p->length, p->snd_nxt, p->snd_una,
- p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt);
+ &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
+ p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
}
static ssize_t tcpprobe_read(struct file *file, char __user *buf,
@@ -176,7 +217,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
return -EINVAL;
while (cnt < len) {
- char tbuf[164];
+ char tbuf[256];
int width;
/* Wait for data in buffer */
@@ -223,6 +264,13 @@ static __init int tcpprobe_init(void)
{
int ret = -ENOMEM;
+ /* Warning: if the function signature of tcp_rcv_established,
+ * has been changed, you also have to change the signature of
+ * jtcp_rcv_established, otherwise you end up right here!
+ */
+ BUILD_BUG_ON(__same_type(tcp_rcv_established,
+ jtcp_rcv_established) == 0);
+
init_waitqueue_head(&tcp_probe.wait);
spin_lock_init(&tcp_probe.lock);
@@ -241,7 +289,8 @@ static __init int tcpprobe_init(void)
if (ret)
goto err1;
- pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize);
+ pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
+ port, fwmark, bufsize);
return 0;
err1:
remove_proc_entry(procname, init_net.proc_net);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 766e6bab9113..74d2c95db57f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -704,7 +704,7 @@ EXPORT_SYMBOL(udp_flush_pending_frames);
* @src: source IP address
* @dst: destination IP address
*/
-static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
{
struct udphdr *uh = udp_hdr(skb);
struct sk_buff *frags = skb_shinfo(skb)->frag_list;
@@ -740,6 +740,7 @@ static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
uh->check = CSUM_MANGLED_0;
}
}
+EXPORT_SYMBOL_GPL(udp4_hwcsum);
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
{
@@ -2158,7 +2159,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
@@ -2336,7 +2337,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
uh->len = htons(skb->len - udp_offset);
/* csum segment if tunnel sets skb with csum. */
- if (unlikely(uh->check)) {
+ if (protocol == htons(ETH_P_IP) && unlikely(uh->check)) {
struct iphdr *iph = ip_hdr(skb);
uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
@@ -2347,7 +2348,18 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ u32 len = skb->len - udp_offset;
+
+ uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ len, IPPROTO_UDP, 0);
+ uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0));
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ skb->ip_summed = CHECKSUM_NONE;
}
+
skb->protocol = protocol;
} while ((skb = skb->next));
out:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 498ea99194af..d6ff12617f36 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -99,9 +99,9 @@
#define ACONF_DEBUG 2
#if ACONF_DEBUG >= 3
-#define ADBG(x) printk x
+#define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
#else
-#define ADBG(x)
+#define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
#endif
#define INFINITY_LIFE_TIME 0xFFFFFFFF
@@ -177,6 +177,8 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_redirects = 1,
.autoconf = 1,
.force_mld_version = 0,
+ .mldv1_unsolicited_report_interval = 10 * HZ,
+ .mldv2_unsolicited_report_interval = HZ,
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
@@ -202,6 +204,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.accept_dad = 1,
+ .suppress_frag_ndisc = 1,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -211,6 +214,9 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.accept_ra = 1,
.accept_redirects = 1,
.autoconf = 1,
+ .force_mld_version = 0,
+ .mldv1_unsolicited_report_interval = 10 * HZ,
+ .mldv2_unsolicited_report_interval = HZ,
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
@@ -236,17 +242,9 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.accept_dad = 1,
+ .suppress_frag_ndisc = 1,
};
-/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
-const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
-const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
-const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
-const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
-const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
-const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
-const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
-
/* Check if a valid qdisc is available */
static inline bool addrconf_qdisc_ok(const struct net_device *dev)
{
@@ -306,36 +304,6 @@ err_ip:
return -ENOMEM;
}
-static void snmp6_free_dev(struct inet6_dev *idev)
-{
- kfree(idev->stats.icmpv6msgdev);
- kfree(idev->stats.icmpv6dev);
- snmp_mib_free((void __percpu **)idev->stats.ipv6);
-}
-
-/* Nobody refers to this device, we may destroy it. */
-
-void in6_dev_finish_destroy(struct inet6_dev *idev)
-{
- struct net_device *dev = idev->dev;
-
- WARN_ON(!list_empty(&idev->addr_list));
- WARN_ON(idev->mc_list != NULL);
- WARN_ON(timer_pending(&idev->rs_timer));
-
-#ifdef NET_REFCNT_DEBUG
- pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
-#endif
- dev_put(dev);
- if (!idev->dead) {
- pr_warn("Freeing alive inet6 device %p\n", idev);
- return;
- }
- snmp6_free_dev(idev);
- kfree_rcu(idev, rcu);
-}
-EXPORT_SYMBOL(in6_dev_finish_destroy);
-
static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
{
struct inet6_dev *ndev;
@@ -369,9 +337,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
dev_hold(dev);
if (snmp6_alloc_dev(ndev) < 0) {
- ADBG((KERN_WARNING
+ ADBG(KERN_WARNING
"%s: cannot allocate memory for statistics; dev=%s.\n",
- __func__, dev->name));
+ __func__, dev->name);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
dev_put(dev);
kfree(ndev);
@@ -379,9 +347,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
}
if (snmp6_register_dev(ndev) < 0) {
- ADBG((KERN_WARNING
+ ADBG(KERN_WARNING
"%s: cannot create /proc/net/dev_snmp6/%s\n",
- __func__, dev->name));
+ __func__, dev->name);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
ndev->dead = 1;
in6_dev_finish_destroy(ndev);
@@ -844,7 +812,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
/* Ignore adding duplicate addresses on an interface */
if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
- ADBG(("ipv6_add_addr: already assigned\n"));
+ ADBG("ipv6_add_addr: already assigned\n");
err = -EEXIST;
goto out;
}
@@ -852,7 +820,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
if (ifa == NULL) {
- ADBG(("ipv6_add_addr: malloc failed\n"));
+ ADBG("ipv6_add_addr: malloc failed\n");
err = -ENOBUFS;
goto out;
}
@@ -1054,7 +1022,6 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
unsigned long regen_advance;
int tmp_plen;
int ret = 0;
- int max_addresses;
u32 addr_flags;
unsigned long now = jiffies;
@@ -1100,7 +1067,6 @@ retry:
idev->cnf.temp_prefered_lft + age -
idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
- max_addresses = idev->cnf.max_addresses;
tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock);
@@ -1807,6 +1773,16 @@ static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
}
+static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
+{
+ memcpy(eui, dev->perm_addr, 3);
+ memcpy(eui + 5, dev->perm_addr + 3, 3);
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ eui[0] ^= 2;
+ return 0;
+}
+
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{
switch (dev->type) {
@@ -1825,6 +1801,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
return addrconf_ifid_eui64(eui, dev);
case ARPHRD_IEEE1394:
return addrconf_ifid_ieee1394(eui, dev);
+ case ARPHRD_TUNNEL6:
+ return addrconf_ifid_ip6tnl(eui, dev);
}
return -1;
}
@@ -2050,7 +2028,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
pinfo = (struct prefix_info *) opt;
if (len < sizeof(struct prefix_info)) {
- ADBG(("addrconf: prefix option too short\n"));
+ ADBG("addrconf: prefix option too short\n");
return;
}
@@ -2702,7 +2680,8 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_ARCNET) &&
(dev->type != ARPHRD_INFINIBAND) &&
(dev->type != ARPHRD_IEEE802154) &&
- (dev->type != ARPHRD_IEEE1394)) {
+ (dev->type != ARPHRD_IEEE1394) &&
+ (dev->type != ARPHRD_TUNNEL6)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
@@ -2788,44 +2767,6 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
return -1;
}
-static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
-{
- struct net_device *link_dev;
- struct net *net = dev_net(idev->dev);
-
- /* first try to inherit the link-local address from the link device */
- if (idev->dev->iflink &&
- (link_dev = __dev_get_by_index(net, idev->dev->iflink))) {
- if (!ipv6_inherit_linklocal(idev, link_dev))
- return;
- }
- /* then try to inherit it from any device */
- for_each_netdev(net, link_dev) {
- if (!ipv6_inherit_linklocal(idev, link_dev))
- return;
- }
- pr_debug("init ip6-ip6: add_linklocal failed\n");
-}
-
-/*
- * Autoconfigure tunnel with a link-local address so routing protocols,
- * DHCPv6, MLD etc. can be run over the virtual link
- */
-
-static void addrconf_ip6_tnl_config(struct net_device *dev)
-{
- struct inet6_dev *idev;
-
- ASSERT_RTNL();
-
- idev = addrconf_add_dev(dev);
- if (IS_ERR(idev)) {
- pr_debug("init ip6-ip6: add_dev failed\n");
- return;
- }
- ip6_tnl_add_linklocal(idev);
-}
-
static int addrconf_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -2893,9 +2834,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
addrconf_gre_config(dev);
break;
#endif
- case ARPHRD_TUNNEL6:
- addrconf_ip6_tnl_config(dev);
- break;
case ARPHRD_LOOPBACK:
init_loopback(dev);
break;
@@ -3120,6 +3058,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
static void addrconf_rs_timer(unsigned long data)
{
struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct net_device *dev = idev->dev;
struct in6_addr lladdr;
write_lock(&idev->lock);
@@ -3134,12 +3073,14 @@ static void addrconf_rs_timer(unsigned long data)
goto out;
if (idev->rs_probes++ < idev->cnf.rtr_solicits) {
- if (!__ipv6_get_lladdr(idev, &lladdr, IFA_F_TENTATIVE))
- ndisc_send_rs(idev->dev, &lladdr,
+ write_unlock(&idev->lock);
+ if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
+ ndisc_send_rs(dev, &lladdr,
&in6addr_linklocal_allrouters);
else
- goto out;
+ goto put;
+ write_lock(&idev->lock);
/* The wait after the last probe can be shorter */
addrconf_mod_rs_timer(idev, (idev->rs_probes ==
idev->cnf.rtr_solicits) ?
@@ -3155,6 +3096,7 @@ static void addrconf_rs_timer(unsigned long data)
out:
write_unlock(&idev->lock);
+put:
in6_dev_put(idev);
}
@@ -3630,8 +3572,8 @@ restart:
if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
- ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
- now, next, next_sec, next_sched));
+ ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
+ now, next, next_sec, next_sched);
addr_chk_timer.expires = next_sched;
add_timer(&addr_chk_timer);
@@ -4177,6 +4119,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_RTR_SOLICIT_DELAY] =
jiffies_to_msecs(cnf->rtr_solicit_delay);
array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
+ array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
+ jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
+ array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
+ jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
#ifdef CONFIG_IPV6_PRIVACY
array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
@@ -4207,6 +4153,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
+ array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
}
static inline size_t inet6_ifla6_size(void)
@@ -4652,6 +4599,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
break;
}
atomic_inc(&net->ipv6.dev_addr_genid);
+ rt_genid_bump_ipv6(net);
}
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -4859,6 +4807,22 @@ static struct addrconf_sysctl_table
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "mldv1_unsolicited_report_interval",
+ .data =
+ &ipv6_devconf.mldv1_unsolicited_report_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
+ .procname = "mldv2_unsolicited_report_interval",
+ .data =
+ &ipv6_devconf.mldv2_unsolicited_report_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
#ifdef CONFIG_IPV6_PRIVACY
{
.procname = "use_tempaddr",
@@ -5004,6 +4968,13 @@ static struct addrconf_sysctl_table
.proc_handler = proc_dointvec
},
{
+ .procname = "suppress_frag_ndisc",
+ .data = &ipv6_devconf.suppress_frag_ndisc,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
/* sentinel */
}
},
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index d2f87427244b..4c11cbcf8308 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -6,6 +6,7 @@
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/ip.h>
#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
@@ -98,3 +99,52 @@ int inet6addr_notifier_call_chain(unsigned long val, void *v)
return atomic_notifier_call_chain(&inet6addr_chain, val, v);
}
EXPORT_SYMBOL(inet6addr_notifier_call_chain);
+
+const struct ipv6_stub *ipv6_stub __read_mostly;
+EXPORT_SYMBOL_GPL(ipv6_stub);
+
+/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
+const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
+EXPORT_SYMBOL(in6addr_loopback);
+const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
+EXPORT_SYMBOL(in6addr_any);
+const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+EXPORT_SYMBOL(in6addr_linklocal_allnodes);
+const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
+EXPORT_SYMBOL(in6addr_linklocal_allrouters);
+const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
+EXPORT_SYMBOL(in6addr_interfacelocal_allnodes);
+const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
+EXPORT_SYMBOL(in6addr_interfacelocal_allrouters);
+const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
+EXPORT_SYMBOL(in6addr_sitelocal_allrouters);
+
+static void snmp6_free_dev(struct inet6_dev *idev)
+{
+ kfree(idev->stats.icmpv6msgdev);
+ kfree(idev->stats.icmpv6dev);
+ snmp_mib_free((void __percpu **)idev->stats.ipv6);
+}
+
+/* Nobody refers to this device, we may destroy it. */
+
+void in6_dev_finish_destroy(struct inet6_dev *idev)
+{
+ struct net_device *dev = idev->dev;
+
+ WARN_ON(!list_empty(&idev->addr_list));
+ WARN_ON(idev->mc_list != NULL);
+ WARN_ON(timer_pending(&idev->rs_timer));
+
+#ifdef NET_REFCNT_DEBUG
+ pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
+#endif
+ dev_put(dev);
+ if (!idev->dead) {
+ pr_warn("Freeing alive inet6 device %p\n", idev);
+ return;
+ }
+ snmp6_free_dev(idev);
+ kfree_rcu(idev, rcu);
+}
+EXPORT_SYMBOL(in6_dev_finish_destroy);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f083a583a05c..b30ad3741b46 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -251,38 +251,36 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
/* add a label */
static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
{
+ struct hlist_node *n;
+ struct ip6addrlbl_entry *last = NULL, *p = NULL;
int ret = 0;
- ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n",
- __func__,
- newp, replace);
+ ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp,
+ replace);
- if (hlist_empty(&ip6addrlbl_table.head)) {
- hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
- } else {
- struct hlist_node *n;
- struct ip6addrlbl_entry *p = NULL;
- hlist_for_each_entry_safe(p, n,
- &ip6addrlbl_table.head, list) {
- if (p->prefixlen == newp->prefixlen &&
- net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
- p->ifindex == newp->ifindex &&
- ipv6_addr_equal(&p->prefix, &newp->prefix)) {
- if (!replace) {
- ret = -EEXIST;
- goto out;
- }
- hlist_replace_rcu(&p->list, &newp->list);
- ip6addrlbl_put(p);
- goto out;
- } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
- (p->prefixlen < newp->prefixlen)) {
- hlist_add_before_rcu(&newp->list, &p->list);
+ hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
+ if (p->prefixlen == newp->prefixlen &&
+ net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
+ p->ifindex == newp->ifindex &&
+ ipv6_addr_equal(&p->prefix, &newp->prefix)) {
+ if (!replace) {
+ ret = -EEXIST;
goto out;
}
+ hlist_replace_rcu(&p->list, &newp->list);
+ ip6addrlbl_put(p);
+ goto out;
+ } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
+ (p->prefixlen < newp->prefixlen)) {
+ hlist_add_before_rcu(&newp->list, &p->list);
+ goto out;
}
- hlist_add_after_rcu(&p->list, &newp->list);
+ last = p;
}
+ if (last)
+ hlist_add_after_rcu(&last->list, &newp->list);
+ else
+ hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
out:
if (!ret)
ip6addrlbl_table.seq++;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index a5ac969aeefe..136fe55c1a47 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -56,6 +56,7 @@
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
+#include <net/ndisc.h>
#ifdef CONFIG_IPV6_TUNNEL
#include <net/ip6_tunnel.h>
#endif
@@ -766,6 +767,7 @@ static int __net_init inet6_net_init(struct net *net)
net->ipv6.sysctl.bindv6only = 0;
net->ipv6.sysctl.icmpv6_time = 1*HZ;
+ atomic_set(&net->ipv6.rt_genid, 0);
err = ipv6_init_mibs(net);
if (err)
@@ -809,6 +811,15 @@ static struct pernet_operations inet6_net_ops = {
.exit = inet6_net_exit,
};
+static const struct ipv6_stub ipv6_stub_impl = {
+ .ipv6_sock_mc_join = ipv6_sock_mc_join,
+ .ipv6_sock_mc_drop = ipv6_sock_mc_drop,
+ .ipv6_dst_lookup = ip6_dst_lookup,
+ .udpv6_encap_enable = udpv6_encap_enable,
+ .ndisc_send_na = ndisc_send_na,
+ .nd_tbl = &nd_tbl,
+};
+
static int __init inet6_init(void)
{
struct list_head *r;
@@ -883,6 +894,9 @@ static int __init inet6_init(void)
err = igmp6_init();
if (err)
goto igmp_fail;
+
+ ipv6_stub = &ipv6_stub_impl;
+
err = ipv6_netfilter_init();
if (err)
goto netfilter_fail;
@@ -1039,6 +1053,7 @@ static void __exit inet6_exit(void)
raw6_proc_exit();
#endif
ipv6_netfilter_fini();
+ ipv6_stub = NULL;
igmp6_cleanup();
ndisc_cleanup();
ip6_mr_cleanup();
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index bb02e176cb70..73784c3d4642 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -628,7 +628,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, skb->dev->ifindex, 0);
else
ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 197e6f4a2b74..48b6bd2a9a14 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -890,7 +890,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
src = &np->rcv_saddr;
seq_printf(seq,
"%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
bucket,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index aeac0dc3635d..d3618a78fcac 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -447,7 +447,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, skb->dev->ifindex, 0);
else
ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 2e1a432867c0..a6c58ce43d34 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -55,26 +55,33 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
struct fib6_table *table;
struct net *net = rule->fr_net;
pol_lookup_t lookup = arg->lookup_ptr;
+ int err = 0;
switch (rule->action) {
case FR_ACT_TO_TBL:
break;
case FR_ACT_UNREACHABLE:
+ err = -ENETUNREACH;
rt = net->ipv6.ip6_null_entry;
goto discard_pkt;
default:
case FR_ACT_BLACKHOLE:
+ err = -EINVAL;
rt = net->ipv6.ip6_blk_hole_entry;
goto discard_pkt;
case FR_ACT_PROHIBIT:
+ err = -EACCES;
rt = net->ipv6.ip6_prohibit_entry;
goto discard_pkt;
}
table = fib6_get_table(net, rule->table);
- if (table)
- rt = lookup(net, table, flp6, flags);
+ if (!table) {
+ err = -EAGAIN;
+ goto out;
+ }
+ rt = lookup(net, table, flp6, flags);
if (rt != net->ipv6.ip6_null_entry) {
struct fib6_rule *r = (struct fib6_rule *)rule;
@@ -101,6 +108,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
}
again:
ip6_rt_put(rt);
+ err = -EAGAIN;
rt = NULL;
goto out;
@@ -108,9 +116,31 @@ discard_pkt:
dst_hold(&rt->dst);
out:
arg->result = rt;
- return rt == NULL ? -EAGAIN : 0;
+ return err;
}
+static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+{
+ struct rt6_info *rt = (struct rt6_info *) arg->result;
+ struct net_device *dev = rt->rt6i_idev->dev;
+ /* do not accept result if the route does
+ * not meet the required prefix length
+ */
+ if (rt->rt6i_dst.plen <= rule->suppress_prefixlen)
+ goto suppress_route;
+
+ /* do not accept result if the route uses a device
+ * belonging to a forbidden interface group
+ */
+ if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup)
+ goto suppress_route;
+
+ return false;
+
+suppress_route:
+ ip6_rt_put(rt);
+ return true;
+}
static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
@@ -244,6 +274,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.addr_size = sizeof(struct in6_addr),
.action = fib6_rule_action,
.match = fib6_rule_match,
+ .suppress = fib6_rule_suppress,
.configure = fib6_rule_configure,
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 7cfc8d284870..eef8d945b362 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -92,7 +92,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (type == ICMPV6_PKT_TOOBIG)
ip6_update_pmtu(skb, net, info, 0, 0);
else if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, skb->dev->ifindex, 0);
if (!(type & ICMPV6_INFOMSG_MASK))
if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
@@ -940,6 +940,14 @@ static const struct icmp6_err {
.err = ECONNREFUSED,
.fatal = 1,
},
+ { /* POLICY_FAIL */
+ .err = EACCES,
+ .fatal = 1,
+ },
+ { /* REJECT_ROUTE */
+ .err = EACCES,
+ .fatal = 1,
+ },
};
int icmpv6_err_convert(u8 type, u8 code, int *err)
@@ -951,7 +959,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
switch (type) {
case ICMPV6_DEST_UNREACH:
fatal = 1;
- if (code <= ICMPV6_PORT_UNREACH) {
+ if (code < ARRAY_SIZE(tab_unreach)) {
*err = tab_unreach[code].err;
fatal = tab_unreach[code].fatal;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index c4ff5bbb45c4..73db48eba1c4 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -425,8 +425,8 @@ out:
* node.
*/
-static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
- int addrlen, int plen,
+static struct fib6_node *fib6_add_1(struct fib6_node *root,
+ struct in6_addr *addr, int plen,
int offset, int allow_create,
int replace_required)
{
@@ -543,7 +543,7 @@ insert_above:
but if it is >= plen, the value is ignored in any case.
*/
- bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
+ bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr));
/*
* (intermediate)[in]
@@ -822,9 +822,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
if (!allow_create && !replace_required)
pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
- fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
- rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
- allow_create, replace_required);
+ fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
+ offsetof(struct rt6_info, rt6i_dst), allow_create,
+ replace_required);
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
@@ -863,7 +863,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
/* Now add the first leaf node to new subtree */
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
- sizeof(struct in6_addr), rt->rt6i_src.plen,
+ rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
@@ -882,7 +882,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
fn->subtree = sfn;
} else {
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
- sizeof(struct in6_addr), rt->rt6i_src.plen,
+ rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 90747f1973fe..6b26e9feafb9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -335,6 +335,7 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
dev->rtnl_link_ops = &ip6gre_link_ops;
nt->dev = dev;
+ nt->net = dev_net(dev);
ip6gre_tnl_link_config(nt, 1);
if (register_netdevice(dev) < 0)
@@ -508,8 +509,6 @@ static int ip6gre_rcv(struct sk_buff *skb)
goto drop;
}
- secpath_reset(skb);
-
skb->protocol = gre_proto;
/* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IP
@@ -524,7 +523,6 @@ static int ip6gre_rcv(struct sk_buff *skb)
skb->mac_header = skb->network_header;
__pskb_pull(skb, offset);
skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
- skb->pkt_type = PACKET_HOST;
if (((flags&GRE_CSUM) && csum) ||
(!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
@@ -556,7 +554,7 @@ static int ip6gre_rcv(struct sk_buff *skb)
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
- __skb_tunnel_rx(skb, tunnel->dev);
+ __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
skb_reset_network_header(skb);
@@ -693,6 +691,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
tunnel->err_count = 0;
}
+ skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
+
max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
@@ -709,8 +709,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
skb = new_skb;
}
- skb_dst_drop(skb);
-
if (fl6->flowi6_mark) {
skb_dst_set(skb, dst);
ndst = NULL;
@@ -1260,6 +1258,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
tunnel = netdev_priv(dev);
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
@@ -1280,6 +1279,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
struct ip6_tnl *tunnel = netdev_priv(dev);
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
tunnel->hlen = sizeof(struct ipv6hdr) + 4;
@@ -1455,6 +1455,7 @@ static int ip6gre_tap_init(struct net_device *dev)
tunnel = netdev_priv(dev);
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
ip6gre_tnl_link_config(tunnel, 1);
@@ -1506,6 +1507,7 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
eth_hw_addr_random(dev);
nt->dev = dev;
+ nt->net = dev_net(dev);
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
/* Can use a lockless transmit, unless we generate output sequences */
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 2bab2aa59745..302d6fb1ff2b 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -44,7 +44,7 @@
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/xfrm.h>
-
+#include <net/inet_ecn.h>
int ip6_rcv_finish(struct sk_buff *skb)
@@ -109,6 +109,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (hdr->version != 6)
goto err;
+ IP6_ADD_STATS_BH(dev_net(dev), idev,
+ IPSTATS_MIB_NOECTPKTS +
+ (ipv6_get_dsfield(hdr) & INET_ECN_MASK),
+ max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
/*
* RFC4291 2.5.3
* A packet received on an interface with a destination address
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index a263b990ee11..d82de7228100 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -91,6 +91,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
unsigned int unfrag_ip6hlen;
u8 *prevhdr;
int offset = 0;
+ bool tunnel;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_UDP |
@@ -106,6 +107,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
+ tunnel = skb->encapsulation;
ipv6h = ipv6_hdr(skb);
__skb_pull(skb, sizeof(*ipv6h));
segs = ERR_PTR(-EPROTONOSUPPORT);
@@ -126,7 +128,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
ipv6h = ipv6_hdr(skb);
ipv6h->payload_len = htons(skb->len - skb->mac_len -
sizeof(*ipv6h));
- if (proto == IPPROTO_UDP) {
+ if (!tunnel && proto == IPPROTO_UDP) {
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
fptr = (struct frag_hdr *)(skb_network_header(skb) +
unfrag_ip6hlen);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index e7ceb6c871d1..3a692d529163 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -56,31 +56,6 @@
#include <net/checksum.h>
#include <linux/mroute6.h>
-int __ip6_local_out(struct sk_buff *skb)
-{
- int len;
-
- len = skb->len - sizeof(struct ipv6hdr);
- if (len > IPV6_MAXPLEN)
- len = 0;
- ipv6_hdr(skb)->payload_len = htons(len);
-
- return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
- skb_dst(skb)->dev, dst_output);
-}
-
-int ip6_local_out(struct sk_buff *skb)
-{
- int err;
-
- err = __ip6_local_out(skb);
- if (likely(err == 1))
- err = dst_output(skb);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(ip6_local_out);
-
static int ip6_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 46ba243605a3..61355f7f4da5 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -41,6 +41,7 @@
#include <linux/netfilter_ipv6.h>
#include <linux/slab.h>
#include <linux/hash.h>
+#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
@@ -315,6 +316,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
t = netdev_priv(dev);
t->parms = *p;
+ t->net = dev_net(dev);
err = ip6_tnl_create2(dev);
if (err < 0)
goto failed_free;
@@ -374,7 +376,7 @@ static void
ip6_tnl_dev_uninit(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct net *net = dev_net(dev);
+ struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
@@ -741,7 +743,7 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
{
struct __ip6_tnl_parm *p = &t->parms;
int ret = 0;
- struct net *net = dev_net(t->dev);
+ struct net *net = t->net;
if ((p->flags & IP6_TNL_F_CAP_RCV) ||
((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
@@ -800,14 +802,12 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
rcu_read_unlock();
goto discard;
}
- secpath_reset(skb);
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
skb->protocol = htons(protocol);
- skb->pkt_type = PACKET_HOST;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
- __skb_tunnel_rx(skb, t->dev);
+ __skb_tunnel_rx(skb, t->dev, t->net);
err = dscp_ecn_decapsulate(t, ipv6h, skb);
if (unlikely(err)) {
@@ -895,7 +895,7 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
{
struct __ip6_tnl_parm *p = &t->parms;
int ret = 0;
- struct net *net = dev_net(t->dev);
+ struct net *net = t->net;
if (p->flags & IP6_TNL_F_CAP_XMIT) {
struct net_device *ldev = NULL;
@@ -945,8 +945,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
int encap_limit,
__u32 *pmtu)
{
- struct net *net = dev_net(dev);
struct ip6_tnl *t = netdev_priv(dev);
+ struct net *net = t->net;
struct net_device_stats *stats = &t->dev->stats;
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct ipv6_tel_txoption opt;
@@ -996,6 +996,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
goto tx_err_dst_release;
}
+ skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
+
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
@@ -1013,7 +1015,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
consume_skb(skb);
skb = new_skb;
}
- skb_dst_drop(skb);
if (fl6->flowi6_mark) {
skb_dst_set(skb, dst);
ndst = NULL;
@@ -1208,7 +1209,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
int strict = (ipv6_addr_type(&p->raddr) &
(IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
- struct rt6_info *rt = rt6_lookup(dev_net(dev),
+ struct rt6_info *rt = rt6_lookup(t->net,
&p->raddr, &p->laddr,
p->link, strict);
@@ -1257,7 +1258,7 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
{
- struct net *net = dev_net(t->dev);
+ struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
int err;
@@ -1469,8 +1470,10 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->mtu-=8;
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
}
@@ -1485,6 +1488,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
struct ip6_tnl *t = netdev_priv(dev);
t->dev = dev;
+ t->net = dev_net(dev);
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
@@ -1602,9 +1606,9 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
- struct ip6_tnl *t;
+ struct ip6_tnl *t = netdev_priv(dev);
struct __ip6_tnl_parm p;
- struct net *net = dev_net(dev);
+ struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
@@ -1705,14 +1709,24 @@ static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
{
+ struct net *net = dev_net(ip6n->fb_tnl_dev);
+ struct net_device *dev, *aux;
int h;
struct ip6_tnl *t;
LIST_HEAD(list);
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &ip6_link_ops)
+ unregister_netdevice_queue(dev, &list);
+
for (h = 0; h < HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
while (t != NULL) {
- unregister_netdevice_queue(t->dev, &list);
+ /* If dev is in the same netns, it has already
+ * been added to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(t->dev), net))
+ unregister_netdevice_queue(t->dev, &list);
t = rtnl_dereference(t->next);
}
}
@@ -1738,6 +1752,10 @@ static int __net_init ip6_tnl_init_net(struct net *net)
if (!ip6n->fb_tnl_dev)
goto err_alloc_dev;
dev_net_set(ip6n->fb_tnl_dev, net);
+ /* FB netdevice is special: we have one, and only one per netns.
+ * Allowing to move it to another netns is clearly unsafe.
+ */
+ ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
if (err < 0)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 03986d31fa41..f365310bfcca 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -110,8 +110,8 @@ static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
static void ip6mr_free_table(struct mr6_table *mrt);
-static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
- struct sk_buff *skb, struct mfc6_cache *cache);
+static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+ struct sk_buff *skb, struct mfc6_cache *cache);
static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
mifi_t mifi, int assert);
static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
@@ -672,9 +672,8 @@ static int pim6_rcv(struct sk_buff *skb)
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_NONE;
- skb->pkt_type = PACKET_HOST;
- skb_tunnel_rx(skb, reg_dev);
+ skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
netif_rx(skb);
@@ -2074,8 +2073,8 @@ static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
return ct;
}
-static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
- struct sk_buff *skb, struct mfc6_cache *cache)
+static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+ struct sk_buff *skb, struct mfc6_cache *cache)
{
int psend = -1;
int vif, ct;
@@ -2156,12 +2155,11 @@ forward:
last_forward:
if (psend != -1) {
ip6mr_forward2(net, mrt, skb, cache, psend);
- return 0;
+ return;
}
dont_forward:
kfree_skb(skb);
- return 0;
}
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 7af5aee75d98..5636a912074a 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -76,7 +76,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, skb->dev->ifindex, 0);
else
ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 99cd65c715cd..096cd67b737c 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -44,6 +44,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/pkt_sched.h>
#include <net/mld.h>
#include <linux/netfilter.h>
@@ -94,6 +95,7 @@ static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
static void mld_clear_delrec(struct inet6_dev *idev);
+static bool mld_in_v1_mode(const struct inet6_dev *idev);
static int sf_setstate(struct ifmcaddr6 *pmc);
static void sf_markstate(struct ifmcaddr6 *pmc);
static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
@@ -106,14 +108,15 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
struct inet6_dev *idev);
-
-#define IGMP6_UNSOLICITED_IVAL (10*HZ)
#define MLD_QRV_DEFAULT 2
+/* RFC3810, 9.2. Query Interval */
+#define MLD_QI_DEFAULT (125 * HZ)
+/* RFC3810, 9.3. Query Response Interval */
+#define MLD_QRI_DEFAULT (10 * HZ)
-#define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \
- (idev)->cnf.force_mld_version == 1 || \
- ((idev)->mc_v1_seen && \
- time_before(jiffies, (idev)->mc_v1_seen)))
+/* RFC3810, 8.1 Query Version Distinctions */
+#define MLD_V1_QUERY_LEN 24
+#define MLD_V2_QUERY_LEN_MIN 28
#define IPV6_MLD_MAX_MSF 64
@@ -128,6 +131,18 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
pmc != NULL; \
pmc = rcu_dereference(pmc->next))
+static int unsolicited_report_interval(struct inet6_dev *idev)
+{
+ int iv;
+
+ if (mld_in_v1_mode(idev))
+ iv = idev->cnf.mldv1_unsolicited_report_interval;
+ else
+ iv = idev->cnf.mldv2_unsolicited_report_interval;
+
+ return iv > 0 ? iv : 1;
+}
+
int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
struct net_device *dev = NULL;
@@ -676,7 +691,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
return;
- if (MLD_V1_SEEN(mc->idev)) {
+ if (mld_in_v1_mode(mc->idev)) {
igmp6_join_group(mc);
return;
}
@@ -984,29 +999,49 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
static void mld_gq_start_timer(struct inet6_dev *idev)
{
- int tv = net_random() % idev->mc_maxdelay;
+ unsigned long tv = net_random() % idev->mc_maxdelay;
idev->mc_gq_running = 1;
if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
-static void mld_ifc_start_timer(struct inet6_dev *idev, int delay)
+static void mld_gq_stop_timer(struct inet6_dev *idev)
{
- int tv = net_random() % delay;
+ idev->mc_gq_running = 0;
+ if (del_timer(&idev->mc_gq_timer))
+ __in6_dev_put(idev);
+}
+
+static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
+{
+ unsigned long tv = net_random() % delay;
if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
-static void mld_dad_start_timer(struct inet6_dev *idev, int delay)
+static void mld_ifc_stop_timer(struct inet6_dev *idev)
{
- int tv = net_random() % delay;
+ idev->mc_ifc_count = 0;
+ if (del_timer(&idev->mc_ifc_timer))
+ __in6_dev_put(idev);
+}
+
+static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
+{
+ unsigned long tv = net_random() % delay;
if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
+static void mld_dad_stop_timer(struct inet6_dev *idev)
+{
+ if (del_timer(&idev->mc_dad_timer))
+ __in6_dev_put(idev);
+}
+
/*
* IGMP handling (alias multicast ICMPv6 messages)
*/
@@ -1025,12 +1060,9 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
delay = ma->mca_timer.expires - jiffies;
}
- if (delay >= resptime) {
- if (resptime)
- delay = net_random() % resptime;
- else
- delay = 1;
- }
+ if (delay >= resptime)
+ delay = net_random() % resptime;
+
ma->mca_timer.expires = jiffies + delay;
if (!mod_timer(&ma->mca_timer, jiffies + delay))
atomic_inc(&ma->mca_refcnt);
@@ -1097,6 +1129,158 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
return true;
}
+static int mld_force_mld_version(const struct inet6_dev *idev)
+{
+ /* Normally, both are 0 here. If enforcement to a particular is
+ * being used, individual device enforcement will have a lower
+ * precedence over 'all' device (.../conf/all/force_mld_version).
+ */
+
+ if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
+ return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
+ else
+ return idev->cnf.force_mld_version;
+}
+
+static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
+{
+ return mld_force_mld_version(idev) == 2;
+}
+
+static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
+{
+ return mld_force_mld_version(idev) == 1;
+}
+
+static bool mld_in_v1_mode(const struct inet6_dev *idev)
+{
+ if (mld_in_v2_mode_only(idev))
+ return false;
+ if (mld_in_v1_mode_only(idev))
+ return true;
+ if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
+ return true;
+
+ return false;
+}
+
+static void mld_set_v1_mode(struct inet6_dev *idev)
+{
+ /* RFC3810, relevant sections:
+ * - 9.1. Robustness Variable
+ * - 9.2. Query Interval
+ * - 9.3. Query Response Interval
+ * - 9.12. Older Version Querier Present Timeout
+ */
+ unsigned long switchback;
+
+ switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
+
+ idev->mc_v1_seen = jiffies + switchback;
+}
+
+static void mld_update_qrv(struct inet6_dev *idev,
+ const struct mld2_query *mlh2)
+{
+ /* RFC3810, relevant sections:
+ * - 5.1.8. QRV (Querier's Robustness Variable)
+ * - 9.1. Robustness Variable
+ */
+
+ /* The value of the Robustness Variable MUST NOT be zero,
+ * and SHOULD NOT be one. Catch this here if we ever run
+ * into such a case in future.
+ */
+ WARN_ON(idev->mc_qrv == 0);
+
+ if (mlh2->mld2q_qrv > 0)
+ idev->mc_qrv = mlh2->mld2q_qrv;
+
+ if (unlikely(idev->mc_qrv < 2)) {
+ net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
+ idev->mc_qrv, MLD_QRV_DEFAULT);
+ idev->mc_qrv = MLD_QRV_DEFAULT;
+ }
+}
+
+static void mld_update_qi(struct inet6_dev *idev,
+ const struct mld2_query *mlh2)
+{
+ /* RFC3810, relevant sections:
+ * - 5.1.9. QQIC (Querier's Query Interval Code)
+ * - 9.2. Query Interval
+ * - 9.12. Older Version Querier Present Timeout
+ * (the [Query Interval] in the last Query received)
+ */
+ unsigned long mc_qqi;
+
+ if (mlh2->mld2q_qqic < 128) {
+ mc_qqi = mlh2->mld2q_qqic;
+ } else {
+ unsigned long mc_man, mc_exp;
+
+ mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
+ mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
+
+ mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
+ }
+
+ idev->mc_qi = mc_qqi * HZ;
+}
+
+static void mld_update_qri(struct inet6_dev *idev,
+ const struct mld2_query *mlh2)
+{
+ /* RFC3810, relevant sections:
+ * - 5.1.3. Maximum Response Code
+ * - 9.3. Query Response Interval
+ */
+ idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
+}
+
+static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
+ unsigned long *max_delay)
+{
+ unsigned long mldv1_md;
+
+ /* Ignore v1 queries */
+ if (mld_in_v2_mode_only(idev))
+ return -EINVAL;
+
+ /* MLDv1 router present */
+ mldv1_md = ntohs(mld->mld_maxdelay);
+ *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
+
+ mld_set_v1_mode(idev);
+
+ /* cancel MLDv2 report timer */
+ mld_gq_stop_timer(idev);
+ /* cancel the interface change timer */
+ mld_ifc_stop_timer(idev);
+ /* clear deleted report items */
+ mld_clear_delrec(idev);
+
+ return 0;
+}
+
+static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
+ unsigned long *max_delay)
+{
+ /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */
+ if (mld_in_v1_mode(idev))
+ return -EINVAL;
+
+ *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
+
+ mld_update_qrv(idev, mld);
+ mld_update_qi(idev, mld);
+ mld_update_qri(idev, mld);
+
+ idev->mc_maxdelay = *max_delay;
+
+ return 0;
+}
+
/* called with rcu_read_lock() */
int igmp6_event_query(struct sk_buff *skb)
{
@@ -1108,7 +1292,7 @@ int igmp6_event_query(struct sk_buff *skb)
struct mld_msg *mld;
int group_type;
int mark = 0;
- int len;
+ int len, err;
if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
return -EINVAL;
@@ -1122,7 +1306,6 @@ int igmp6_event_query(struct sk_buff *skb)
return -EINVAL;
idev = __in6_dev_get(skb->dev);
-
if (idev == NULL)
return 0;
@@ -1134,35 +1317,23 @@ int igmp6_event_query(struct sk_buff *skb)
!(group_type&IPV6_ADDR_MULTICAST))
return -EINVAL;
- if (len == 24) {
- int switchback;
- /* MLDv1 router present */
-
- /* Translate milliseconds to jiffies */
- max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
-
- switchback = (idev->mc_qrv + 1) * max_delay;
- idev->mc_v1_seen = jiffies + switchback;
-
- /* cancel the interface change timer */
- idev->mc_ifc_count = 0;
- if (del_timer(&idev->mc_ifc_timer))
- __in6_dev_put(idev);
- /* clear deleted report items */
- mld_clear_delrec(idev);
- } else if (len >= 28) {
+ if (len == MLD_V1_QUERY_LEN) {
+ err = mld_process_v1(idev, mld, &max_delay);
+ if (err < 0)
+ return err;
+ } else if (len >= MLD_V2_QUERY_LEN_MIN) {
int srcs_offset = sizeof(struct mld2_query) -
sizeof(struct icmp6hdr);
+
if (!pskb_may_pull(skb, srcs_offset))
return -EINVAL;
mlh2 = (struct mld2_query *)skb_transport_header(skb);
- max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
- if (!max_delay)
- max_delay = 1;
- idev->mc_maxdelay = max_delay;
- if (mlh2->mld2q_qrv)
- idev->mc_qrv = mlh2->mld2q_qrv;
+
+ err = mld_process_v2(idev, mlh2, &max_delay);
+ if (err < 0)
+ return err;
+
if (group_type == IPV6_ADDR_ANY) { /* general query */
if (mlh2->mld2q_nsrcs)
return -EINVAL; /* no sources allowed */
@@ -1376,6 +1547,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
if (!skb)
return NULL;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, hlen);
if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -1769,7 +1941,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
rcu_read_unlock();
return;
}
-
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, hlen);
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -1827,7 +1999,7 @@ err_out:
static void mld_resend_report(struct inet6_dev *idev)
{
- if (MLD_V1_SEEN(idev)) {
+ if (mld_in_v1_mode(idev)) {
struct ifmcaddr6 *mcaddr;
read_lock_bh(&idev->lock);
for (mcaddr = idev->mc_list; mcaddr; mcaddr = mcaddr->next) {
@@ -1891,7 +2063,7 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
else
pmc->mca_sources = psf->sf_next;
if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
- !MLD_V1_SEEN(idev)) {
+ !mld_in_v1_mode(idev)) {
psf->sf_crcount = idev->mc_qrv;
psf->sf_next = pmc->mca_tomb;
pmc->mca_tomb = psf;
@@ -2156,7 +2328,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
- delay = net_random() % IGMP6_UNSOLICITED_IVAL;
+ delay = net_random() % unsolicited_report_interval(ma->idev);
spin_lock_bh(&ma->mca_lock);
if (del_timer(&ma->mca_timer)) {
@@ -2191,7 +2363,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
static void igmp6_leave_group(struct ifmcaddr6 *ma)
{
- if (MLD_V1_SEEN(ma->idev)) {
+ if (mld_in_v1_mode(ma->idev)) {
if (ma->mca_flags & MAF_LAST_REPORTER)
igmp6_send(&ma->mca_addr, ma->idev->dev,
ICMPV6_MGM_REDUCTION);
@@ -2225,7 +2397,7 @@ static void mld_ifc_timer_expire(unsigned long data)
static void mld_ifc_event(struct inet6_dev *idev)
{
- if (MLD_V1_SEEN(idev))
+ if (mld_in_v1_mode(idev))
return;
idev->mc_ifc_count = idev->mc_qrv;
mld_ifc_start_timer(idev, 1);
@@ -2236,7 +2408,7 @@ static void igmp6_timer_handler(unsigned long data)
{
struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
- if (MLD_V1_SEEN(ma->idev))
+ if (mld_in_v1_mode(ma->idev))
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
else
mld_send_report(ma->idev, ma);
@@ -2276,14 +2448,9 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Withdraw multicast list */
read_lock_bh(&idev->lock);
- idev->mc_ifc_count = 0;
- if (del_timer(&idev->mc_ifc_timer))
- __in6_dev_put(idev);
- idev->mc_gq_running = 0;
- if (del_timer(&idev->mc_gq_timer))
- __in6_dev_put(idev);
- if (del_timer(&idev->mc_dad_timer))
- __in6_dev_put(idev);
+ mld_ifc_stop_timer(idev);
+ mld_gq_stop_timer(idev);
+ mld_dad_stop_timer(idev);
for (i = idev->mc_list; i; i=i->next)
igmp6_group_dropped(i);
@@ -2322,8 +2489,12 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
(unsigned long)idev);
setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
(unsigned long)idev);
+
idev->mc_qrv = MLD_QRV_DEFAULT;
- idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL;
+ idev->mc_qi = MLD_QI_DEFAULT;
+ idev->mc_qri = MLD_QRI_DEFAULT;
+
+ idev->mc_maxdelay = unsolicited_report_interval(idev);
idev->mc_v1_seen = 0;
write_unlock_bh(&idev->lock);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 04d31c2fbef1..12179457b2cd 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -372,14 +372,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
int tlen = dev->needed_tailroom;
struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
struct sk_buff *skb;
- int err;
- skb = sock_alloc_send_skb(sk,
- hlen + sizeof(struct ipv6hdr) + len + tlen,
- 1, &err);
+ skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
if (!skb) {
- ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb, err=%d\n",
- __func__, err);
+ ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
+ __func__);
return NULL;
}
@@ -389,6 +386,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
skb_reset_transport_header(skb);
+ /* Manually assign socket ownership as we avoid calling
+ * sock_alloc_send_pskb() to bypass wmem buffer limits
+ */
+ skb_set_owner_w(skb, sk);
+
return skb;
}
@@ -428,7 +430,6 @@ static void ndisc_send_skb(struct sk_buff *skb,
type = icmp6h->icmp6_type;
if (!dst) {
- struct sock *sk = net->ipv6.ndisc_sk;
struct flowi6 fl6;
icmpv6_flow_init(sk, &fl6, type, saddr, daddr, skb->dev->ifindex);
@@ -462,10 +463,10 @@ static void ndisc_send_skb(struct sk_buff *skb,
rcu_read_unlock();
}
-static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
- const struct in6_addr *daddr,
- const struct in6_addr *solicited_addr,
- bool router, bool solicited, bool override, bool inc_opt)
+void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt)
{
struct sk_buff *skb;
struct in6_addr tmpaddr;
@@ -663,9 +664,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
}
ndisc_send_ns(dev, neigh, target, target, saddr);
} else if ((probes -= neigh->parms->app_probes) < 0) {
-#ifdef CONFIG_ARPD
neigh_app_ns(neigh);
-#endif
} else {
addrconf_addr_solict_mult(target, &mcaddr);
ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
@@ -1370,7 +1369,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
return;
if (!ndopts.nd_opts_rh) {
- ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0);
+ ip6_redirect_no_header(skb, dev_net(skb->dev),
+ skb->dev->ifindex, 0);
return;
}
@@ -1519,10 +1519,27 @@ static void pndisc_redo(struct sk_buff *skb)
kfree_skb(skb);
}
+static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
+{
+ struct inet6_dev *idev = __in6_dev_get(skb->dev);
+
+ if (!idev)
+ return true;
+ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED &&
+ idev->cnf.suppress_frag_ndisc) {
+ net_warn_ratelimited("Received fragmented ndisc packet. Carefully consider disabling suppress_frag_ndisc.\n");
+ return true;
+ }
+ return false;
+}
+
int ndisc_rcv(struct sk_buff *skb)
{
struct nd_msg *msg;
+ if (ndisc_suppress_frag_ndisc(skb))
+ return 0;
+
if (skb_linearize(skb))
return 0;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 4433ab40e7de..a7f842b29b67 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -153,6 +153,19 @@ config IP6_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
+config IP6_NF_TARGET_SYNPROXY
+ tristate "SYNPROXY target support"
+ depends on NF_CONNTRACK && NETFILTER_ADVANCED
+ select NETFILTER_SYNPROXY
+ select SYN_COOKIES
+ help
+ The SYNPROXY target allows you to intercept TCP connections and
+ establish them using syncookies before they are passed on to the
+ server. This allows to avoid conntrack and server resource usage
+ during SYN-flood attacks.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP6_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2d11fcc2cf3c..2b53738f798c 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
# l3 independent conntrack
-obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
+obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o
obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
@@ -37,3 +37,4 @@ obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
obj-$(CONFIG_IP6_NF_TARGET_MASQUERADE) += ip6t_MASQUERADE.o
obj-$(CONFIG_IP6_NF_TARGET_NPT) += ip6t_NPT.o
obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
+obj-$(CONFIG_IP6_NF_TARGET_SYNPROXY) += ip6t_SYNPROXY.o
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 47bff6107519..3e4e92d5e157 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -76,7 +76,7 @@ static int masq_device_event(struct notifier_block *this,
if (event == NETDEV_DOWN)
nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex);
+ (void *)(long)dev->ifindex, 0, 0);
return NOTIFY_DONE;
}
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 70f9abc0efe9..56eef30ee5f6 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -169,7 +169,25 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
nf_ct_attach(nskb, oldskb);
- ip6_local_out(nskb);
+#ifdef CONFIG_BRIDGE_NETFILTER
+ /* If we use ip6_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ nskb->protocol = htons(ETH_P_IPV6);
+ ip6h->payload_len = htons(sizeof(struct tcphdr));
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ return;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip6_local_out(nskb);
}
static inline void
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
new file mode 100644
index 000000000000..19cfea8dbcaa
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/ip6_checksum.h>
+#include <net/ip6_route.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_SYNPROXY.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+
+static struct ipv6hdr *
+synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
+ const struct in6_addr *daddr)
+{
+ struct ipv6hdr *iph;
+
+ skb_reset_network_header(skb);
+ iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph));
+ ip6_flow_hdr(iph, 0, 0);
+ iph->hop_limit = 64; //XXX
+ iph->nexthdr = IPPROTO_TCP;
+ iph->saddr = *saddr;
+ iph->daddr = *daddr;
+
+ return iph;
+}
+
+static void
+synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+ struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
+ struct ipv6hdr *niph, struct tcphdr *nth,
+ unsigned int tcp_hdr_size)
+{
+ struct net *net = nf_ct_net((struct nf_conn *)nfct);
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ nth->check = ~tcp_v6_check(tcp_hdr_size, &niph->saddr, &niph->daddr, 0);
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+ nskb->csum_start = (unsigned char *)nth - nskb->head;
+ nskb->csum_offset = offsetof(struct tcphdr, check);
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_TCP;
+ fl6.saddr = niph->saddr;
+ fl6.daddr = niph->daddr;
+ fl6.fl6_sport = nth->source;
+ fl6.fl6_dport = nth->dest;
+ security_skb_classify_flow((struct sk_buff *)skb, flowi6_to_flowi(&fl6));
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst == NULL || dst->error) {
+ dst_release(dst);
+ goto free_nskb;
+ }
+ dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+ if (IS_ERR(dst))
+ goto free_nskb;
+
+ skb_dst_set(nskb, dst);
+
+ if (nfct) {
+ nskb->nfct = nfct;
+ nskb->nfctinfo = ctinfo;
+ nf_conntrack_get(nfct);
+ }
+
+ ip6_local_out(nskb);
+ return;
+
+free_nskb:
+ kfree_skb(nskb);
+}
+
+static void
+synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+ u16 mss = opts->mss;
+
+ iph = ipv6_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->dest;
+ nth->dest = th->source;
+ nth->seq = htonl(__cookie_v6_init_sequence(iph, th, &mss));
+ nth->ack_seq = htonl(ntohl(th->seq) + 1);
+ tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK;
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ tcp_flag_word(nth) |= TCP_FLAG_ECE;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = 0;
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_server_syn(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts, u32 recv_seq)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ipv6_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->source;
+ nth->dest = th->dest;
+ nth->seq = htonl(recv_seq - 1);
+ /* ack_seq is used to relay our ISN to the synproxy hook to initialize
+ * sequence number translation once a connection tracking entry exists.
+ */
+ nth->ack_seq = htonl(ntohl(th->ack_seq) - 1);
+ tcp_flag_word(nth) = TCP_FLAG_SYN;
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = th->window;
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+ niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_server_ack(const struct synproxy_net *snet,
+ const struct ip_ct_tcp *state,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ipv6_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->dest;
+ nth->dest = th->source;
+ nth->seq = htonl(ntohl(th->ack_seq));
+ nth->ack_seq = htonl(ntohl(th->seq) + 1);
+ tcp_flag_word(nth) = TCP_FLAG_ACK;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin);
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_client_ack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ipv6_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->source;
+ nth->dest = th->dest;
+ nth->seq = htonl(ntohl(th->seq) + 1);
+ nth->ack_seq = th->ack_seq;
+ tcp_flag_word(nth) = TCP_FLAG_ACK;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = ntohs(htons(th->window) >> opts->wscale);
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+}
+
+static bool
+synproxy_recv_client_ack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ struct synproxy_options *opts, u32 recv_seq)
+{
+ int mss;
+
+ mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1);
+ if (mss == 0) {
+ this_cpu_inc(snet->stats->cookie_invalid);
+ return false;
+ }
+
+ this_cpu_inc(snet->stats->cookie_valid);
+ opts->mss = mss;
+
+ if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy_check_timestamp_cookie(opts);
+
+ synproxy_send_server_syn(snet, skb, th, opts, recv_seq);
+ return true;
+}
+
+static unsigned int
+synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_synproxy_info *info = par->targinfo;
+ struct synproxy_net *snet = synproxy_pernet(dev_net(par->in));
+ struct synproxy_options opts = {};
+ struct tcphdr *th, _th;
+
+ if (nf_ip6_checksum(skb, par->hooknum, par->thoff, IPPROTO_TCP))
+ return NF_DROP;
+
+ th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th);
+ if (th == NULL)
+ return NF_DROP;
+
+ synproxy_parse_options(skb, par->thoff, th, &opts);
+
+ if (th->syn && !(th->ack || th->fin || th->rst)) {
+ /* Initial SYN from client */
+ this_cpu_inc(snet->stats->syn_received);
+
+ if (th->ece && th->cwr)
+ opts.options |= XT_SYNPROXY_OPT_ECN;
+
+ opts.options &= info->options;
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy_init_timestamp_cookie(info, &opts);
+ else
+ opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
+ XT_SYNPROXY_OPT_SACK_PERM |
+ XT_SYNPROXY_OPT_ECN);
+
+ synproxy_send_client_synack(skb, th, &opts);
+ return NF_DROP;
+
+ } else if (th->ack && !(th->fin || th->rst || th->syn)) {
+ /* ACK from client */
+ synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq));
+ return NF_DROP;
+ }
+
+ return XT_CONTINUE;
+}
+
+static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ struct nf_conn_synproxy *synproxy;
+ struct synproxy_options opts = {};
+ const struct ip_ct_tcp *state;
+ struct tcphdr *th, _th;
+ __be16 frag_off;
+ u8 nexthdr;
+ int thoff;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct == NULL)
+ return NF_ACCEPT;
+
+ synproxy = nfct_synproxy(ct);
+ if (synproxy == NULL)
+ return NF_ACCEPT;
+
+ if (nf_is_loopback_packet(skb))
+ return NF_ACCEPT;
+
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+ &frag_off);
+ if (thoff < 0)
+ return NF_ACCEPT;
+
+ th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
+ if (th == NULL)
+ return NF_DROP;
+
+ state = &ct->proto.tcp;
+ switch (state->state) {
+ case TCP_CONNTRACK_CLOSE:
+ if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
+ ntohl(th->seq) + 1);
+ break;
+ }
+
+ if (!th->syn || th->ack ||
+ CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ break;
+
+ /* Reopened connection - reset the sequence number and timestamp
+ * adjustments, they will get initialized once the connection is
+ * reestablished.
+ */
+ nf_ct_seqadj_init(ct, ctinfo, 0);
+ synproxy->tsoff = 0;
+ this_cpu_inc(snet->stats->conn_reopened);
+
+ /* fall through */
+ case TCP_CONNTRACK_SYN_SENT:
+ synproxy_parse_options(skb, thoff, th, &opts);
+
+ if (!th->syn && th->ack &&
+ CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+ /* Keep-Alives are sent with SEG.SEQ = SND.NXT-1,
+ * therefore we need to add 1 to make the SYN sequence
+ * number match the one of first SYN.
+ */
+ if (synproxy_recv_client_ack(snet, skb, th, &opts,
+ ntohl(th->seq) + 1))
+ this_cpu_inc(snet->stats->cookie_retrans);
+
+ return NF_DROP;
+ }
+
+ synproxy->isn = ntohl(th->ack_seq);
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy->its = opts.tsecr;
+ break;
+ case TCP_CONNTRACK_SYN_RECV:
+ if (!th->syn || !th->ack)
+ break;
+
+ synproxy_parse_options(skb, thoff, th, &opts);
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy->tsoff = opts.tsval - synproxy->its;
+
+ opts.options &= ~(XT_SYNPROXY_OPT_MSS |
+ XT_SYNPROXY_OPT_WSCALE |
+ XT_SYNPROXY_OPT_SACK_PERM);
+
+ swap(opts.tsval, opts.tsecr);
+ synproxy_send_server_ack(snet, state, skb, th, &opts);
+
+ nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
+
+ swap(opts.tsval, opts.tsecr);
+ synproxy_send_client_ack(snet, skb, th, &opts);
+
+ consume_skb(skb);
+ return NF_STOLEN;
+ default:
+ break;
+ }
+
+ synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
+ return NF_ACCEPT;
+}
+
+static int synproxy_tg6_check(const struct xt_tgchk_param *par)
+{
+ const struct ip6t_entry *e = par->entryinfo;
+
+ if (!(e->ipv6.flags & IP6T_F_PROTO) ||
+ e->ipv6.proto != IPPROTO_TCP ||
+ e->ipv6.invflags & XT_INV_PROTO)
+ return -EINVAL;
+
+ return nf_ct_l3proto_try_module_get(par->family);
+}
+
+static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par)
+{
+ nf_ct_l3proto_module_put(par->family);
+}
+
+static struct xt_target synproxy_tg6_reg __read_mostly = {
+ .name = "SYNPROXY",
+ .family = NFPROTO_IPV6,
+ .target = synproxy_tg6,
+ .targetsize = sizeof(struct xt_synproxy_info),
+ .checkentry = synproxy_tg6_check,
+ .destroy = synproxy_tg6_destroy,
+ .me = THIS_MODULE,
+};
+
+static struct nf_hook_ops ipv6_synproxy_ops[] __read_mostly = {
+ {
+ .hook = ipv6_synproxy_hook,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
+ },
+ {
+ .hook = ipv6_synproxy_hook,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
+ },
+};
+
+static int __init synproxy_tg6_init(void)
+{
+ int err;
+
+ err = nf_register_hooks(ipv6_synproxy_ops,
+ ARRAY_SIZE(ipv6_synproxy_ops));
+ if (err < 0)
+ goto err1;
+
+ err = xt_register_target(&synproxy_tg6_reg);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ nf_unregister_hooks(ipv6_synproxy_ops, ARRAY_SIZE(ipv6_synproxy_ops));
+err1:
+ return err;
+}
+
+static void __exit synproxy_tg6_exit(void)
+{
+ xt_unregister_target(&synproxy_tg6_reg);
+ nf_unregister_hooks(ipv6_synproxy_ops, ARRAY_SIZE(ipv6_synproxy_ops));
+}
+
+module_init(synproxy_tg6_init);
+module_exit(synproxy_tg6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index c9b6a6e6a1e8..d6e4dd8b58df 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -28,6 +28,7 @@
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
@@ -158,11 +159,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
/* adjust seqs for loopback traffic only in outgoing direction */
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
!nf_is_loopback_packet(skb)) {
- typeof(nf_nat_seq_adjust_hook) seq_adjust;
-
- seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
- if (!seq_adjust ||
- !seq_adjust(skb, ct, ctinfo, protoff)) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return NF_DROP;
}
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index ab92a3673fbb..827f795209cf 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -5,6 +5,7 @@
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
+#include <net/addrconf.h>
void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
{
@@ -75,3 +76,50 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
return offset;
}
EXPORT_SYMBOL(ip6_find_1stfragopt);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int ip6_dst_hoplimit(struct dst_entry *dst)
+{
+ int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+ if (hoplimit == 0) {
+ struct net_device *dev = dst->dev;
+ struct inet6_dev *idev;
+
+ rcu_read_lock();
+ idev = __in6_dev_get(dev);
+ if (idev)
+ hoplimit = idev->cnf.hop_limit;
+ else
+ hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
+ rcu_read_unlock();
+ }
+ return hoplimit;
+}
+EXPORT_SYMBOL(ip6_dst_hoplimit);
+#endif
+
+int __ip6_local_out(struct sk_buff *skb)
+{
+ int len;
+
+ len = skb->len - sizeof(struct ipv6hdr);
+ if (len > IPV6_MAXPLEN)
+ len = 0;
+ ipv6_hdr(skb)->payload_len = htons(len);
+
+ return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+ skb_dst(skb)->dev, dst_output);
+}
+EXPORT_SYMBOL_GPL(__ip6_local_out);
+
+int ip6_local_out(struct sk_buff *skb)
+{
+ int err;
+
+ err = __ip6_local_out(skb);
+ if (likely(err == 1))
+ err = dst_output(skb);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ip6_local_out);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 51c3285b5d9b..091d066a57b3 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -91,6 +91,10 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
/* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */
+ SNMP_MIB_ITEM("Ip6InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
+ SNMP_MIB_ITEM("Ip6InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("Ip6InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("Ip6InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index cdaed47ba932..58916bbb1728 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -63,6 +63,8 @@
#include <linux/seq_file.h>
#include <linux/export.h>
+#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */
+
static struct raw_hashinfo raw_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
};
@@ -108,11 +110,14 @@ found:
*/
static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
{
- struct icmp6hdr *_hdr;
+ struct icmp6hdr _hdr;
const struct icmp6hdr *hdr;
+ /* We require only the four bytes of the ICMPv6 header, not any
+ * additional bytes of message body in "struct icmp6hdr".
+ */
hdr = skb_header_pointer(skb, skb_transport_offset(skb),
- sizeof(_hdr), &_hdr);
+ ICMPV6_HDRLEN, &_hdr);
if (hdr) {
const __u32 *data = &raw6_sk(sk)->filter.data[0];
unsigned int type = hdr->icmp6_type;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8d9a93ed9c59..c979dd96d82a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -283,9 +283,8 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
- rt->rt6i_genid = rt_genid(net);
+ rt->rt6i_genid = rt_genid_ipv6(net);
INIT_LIST_HEAD(&rt->rt6i_siblings);
- rt->rt6i_nsiblings = 0;
}
return rt;
}
@@ -1062,7 +1061,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
* into this function always.
*/
- if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
+ if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
return NULL;
if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
@@ -1157,6 +1156,77 @@ void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
+/* Handle redirects */
+struct ip6rd_flowi {
+ struct flowi6 fl6;
+ struct in6_addr gateway;
+};
+
+static struct rt6_info *__ip6_route_redirect(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ int flags)
+{
+ struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
+ struct rt6_info *rt;
+ struct fib6_node *fn;
+
+ /* Get the "current" route for this destination and
+ * check if the redirect has come from approriate router.
+ *
+ * RFC 4861 specifies that redirects should only be
+ * accepted if they come from the nexthop to the target.
+ * Due to the way the routes are chosen, this notion
+ * is a bit fuzzy and one might need to check all possible
+ * routes.
+ */
+
+ read_lock_bh(&table->tb6_lock);
+ fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
+restart:
+ for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
+ if (rt6_check_expired(rt))
+ continue;
+ if (rt->dst.error)
+ break;
+ if (!(rt->rt6i_flags & RTF_GATEWAY))
+ continue;
+ if (fl6->flowi6_oif != rt->dst.dev->ifindex)
+ continue;
+ if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
+ continue;
+ break;
+ }
+
+ if (!rt)
+ rt = net->ipv6.ip6_null_entry;
+ else if (rt->dst.error) {
+ rt = net->ipv6.ip6_null_entry;
+ goto out;
+ }
+ BACKTRACK(net, &fl6->saddr);
+out:
+ dst_hold(&rt->dst);
+
+ read_unlock_bh(&table->tb6_lock);
+
+ return rt;
+};
+
+static struct dst_entry *ip6_route_redirect(struct net *net,
+ const struct flowi6 *fl6,
+ const struct in6_addr *gateway)
+{
+ int flags = RT6_LOOKUP_F_HAS_SADDR;
+ struct ip6rd_flowi rdfl;
+
+ rdfl.fl6 = *fl6;
+ rdfl.gateway = *gateway;
+
+ return fib6_rule_lookup(net, &rdfl.fl6,
+ flags, __ip6_route_redirect);
+}
+
void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
{
const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
@@ -1171,9 +1241,8 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
- dst = ip6_route_output(net, NULL, &fl6);
- if (!dst->error)
- rt6_do_redirect(dst, NULL, skb);
+ dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
+ rt6_do_redirect(dst, NULL, skb);
dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_redirect);
@@ -1193,9 +1262,8 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
fl6.daddr = msg->dest;
fl6.saddr = iph->daddr;
- dst = ip6_route_output(net, NULL, &fl6);
- if (!dst->error)
- rt6_do_redirect(dst, NULL, skb);
+ dst = ip6_route_redirect(net, &fl6, &iph->saddr);
+ rt6_do_redirect(dst, NULL, skb);
dst_release(dst);
}
@@ -1355,25 +1423,6 @@ out:
return entries > rt_max_size;
}
-int ip6_dst_hoplimit(struct dst_entry *dst)
-{
- int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
- if (hoplimit == 0) {
- struct net_device *dev = dst->dev;
- struct inet6_dev *idev;
-
- rcu_read_lock();
- idev = __in6_dev_get(dev);
- if (idev)
- hoplimit = idev->cnf.hop_limit;
- else
- hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
- rcu_read_unlock();
- }
- return hoplimit;
-}
-EXPORT_SYMBOL(ip6_dst_hoplimit);
-
/*
*
*/
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 21b25dd8466b..7ee5cb96db34 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -581,12 +581,10 @@ static int ipip6_rcv(struct sk_buff *skb)
tunnel->parms.iph.protocol != 0)
goto out;
- secpath_reset(skb);
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
IPCB(skb)->flags = 0;
skb->protocol = htons(ETH_P_IPV6);
- skb->pkt_type = PACKET_HOST;
if (tunnel->dev->priv_flags & IFF_ISATAP) {
if (!isatap_chksrc(skb, iph, tunnel)) {
@@ -603,7 +601,7 @@ static int ipip6_rcv(struct sk_buff *skb)
}
}
- __skb_tunnel_rx(skb, tunnel->dev);
+ __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
@@ -621,8 +619,6 @@ static int ipip6_rcv(struct sk_buff *skb)
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
- if (tunnel->net != dev_net(tunnel->dev))
- skb_scrub_packet(skb);
netif_rx(skb);
return 0;
@@ -858,9 +854,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tunnel->err_count = 0;
}
- if (tunnel->net != dev_net(dev))
- skb_scrub_packet(skb);
-
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
@@ -891,8 +884,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
skb->encapsulation = 1;
}
- err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
- IPPROTO_IPV6, tos, ttl, df);
+ err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
+ ttl, df, !net_eq(tunnel->net, dev_net(dev)));
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
@@ -1592,7 +1585,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
- if (dev_net(t->dev) != net)
+ if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev,
head);
t = rtnl_dereference(t->next);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index d5dda20bd717..bf63ac8a49b9 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -112,32 +112,38 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
& COOKIEMASK;
}
-__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
+u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+ const struct tcphdr *th, __u16 *mssp)
{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
- const struct tcphdr *th = tcp_hdr(skb);
int mssind;
const __u16 mss = *mssp;
- tcp_synq_overflow(sk);
-
for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
if (mss >= msstab[mssind])
break;
*mssp = msstab[mssind];
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
-
return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
th->dest, ntohl(th->seq),
jiffies / (HZ * 60), mssind);
}
+EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
-static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
+__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
+
+ tcp_synq_overflow(sk);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+
+ return __cookie_v6_init_sequence(iph, th, mssp);
+}
+
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+ __u32 cookie)
+{
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
th->source, th->dest, seq,
@@ -145,6 +151,7 @@ static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
+EXPORT_SYMBOL_GPL(__cookie_v6_check);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
{
@@ -167,7 +174,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out;
if (tcp_synq_no_recent_overflow(sk) ||
- (mss = cookie_check(skb, cookie)) == 0) {
+ (mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie)) == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6e1649d58533..5c71501fc917 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -963,7 +963,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
goto drop;
- if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+ if ((sysctl_tcp_syncookies == 2 ||
+ inet_csk_reqsk_queue_is_full(sk)) && !isn) {
want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
if (!want_cookie)
goto drop;
@@ -1237,8 +1238,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
- tcp_synack_rtt_meas(newsk, req);
- newtp->total_retrans = req->num_retrans;
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
@@ -1361,8 +1360,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
}
}
- if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
- goto reset;
+ tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
if (opt_skb)
goto ipv6_pktoptions;
return 0;
@@ -1427,7 +1425,7 @@ ipv6_pktoptions:
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
if (np->rxopt.bits.rxtclass)
- np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+ np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb)) {
skb_set_owner_r(opt_skb, sk);
opt_skb = xchg(&np->pktoptions, opt_skb);
@@ -1732,7 +1730,7 @@ static void get_openreq6(struct seq_file *seq,
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3],
@@ -1783,7 +1781,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -1926,6 +1924,7 @@ struct proto tcpv6_prot = {
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
+ .stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 5d1b8d7ac993..60559511bd9c 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -21,26 +21,25 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
const struct ipv6hdr *ipv6h;
struct udphdr *uh;
- /* UDP Tunnel offload on ipv6 is not yet supported. */
- if (skb->encapsulation)
- return -EINVAL;
-
if (!pskb_may_pull(skb, sizeof(*uh)))
return -EINVAL;
- ipv6h = ipv6_hdr(skb);
- uh = udp_hdr(skb);
+ if (likely(!skb->encapsulation)) {
+ ipv6h = ipv6_hdr(skb);
+ uh = udp_hdr(skb);
+
+ uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
+ IPPROTO_UDP, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ }
- uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
- IPPROTO_UDP, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- skb->ip_summed = CHECKSUM_PARTIAL;
return 0;
}
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
- netdev_features_t features)
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
@@ -75,47 +74,51 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
goto out;
}
- /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
- * do checksum of UDP packets sent as multiple IP fragments.
- */
- offset = skb_checksum_start_offset(skb);
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
- offset += skb->csum_offset;
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
- skb->ip_summed = CHECKSUM_NONE;
-
- /* Check if there is enough headroom to insert fragment header. */
- tnl_hlen = skb_tnl_header_len(skb);
- if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
- if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
- goto out;
+ if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
+ segs = skb_udp_tunnel_segment(skb, features);
+ else {
+ /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
+ * do checksum of UDP packets sent as multiple IP fragments.
+ */
+ offset = skb_checksum_start_offset(skb);
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ offset += skb->csum_offset;
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Check if there is enough headroom to insert fragment header. */
+ tnl_hlen = skb_tnl_header_len(skb);
+ if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
+ if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+ goto out;
+ }
+
+ /* Find the unfragmentable header and shift it left by frag_hdr_sz
+ * bytes to insert fragment header.
+ */
+ unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ nexthdr = *prevhdr;
+ *prevhdr = NEXTHDR_FRAGMENT;
+ unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+ unfrag_ip6hlen + tnl_hlen;
+ packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+ memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
+
+ SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
+ skb->mac_header -= frag_hdr_sz;
+ skb->network_header -= frag_hdr_sz;
+
+ fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ fptr->nexthdr = nexthdr;
+ fptr->reserved = 0;
+ ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
+
+ /* Fragment the skb. ipv6 header and the remaining fields of the
+ * fragment header are updated in ipv6_gso_segment()
+ */
+ segs = skb_segment(skb, features);
}
- /* Find the unfragmentable header and shift it left by frag_hdr_sz
- * bytes to insert fragment header.
- */
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
- nexthdr = *prevhdr;
- *prevhdr = NEXTHDR_FRAGMENT;
- unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
- unfrag_ip6hlen + tnl_hlen;
- packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
- memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
-
- SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
- skb->mac_header -= frag_hdr_sz;
- skb->network_header -= frag_hdr_sz;
-
- fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
- fptr->nexthdr = nexthdr;
- fptr->reserved = 0;
- ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
-
- /* Fragment the skb. ipv6 header and the remaining fields of the
- * fragment header are updated in ipv6_gso_segment()
- */
- segs = skb_segment(skb, features);
-
out:
return segs;
}
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 65e8833a2510..e15c16a517e7 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -213,7 +213,7 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
ntohs(ipxs->dest_addr.sock));
}
- seq_printf(seq, "%08X %08X %02X %03d\n",
+ seq_printf(seq, "%08X %08X %02X %03u\n",
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_state,
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index ae43c62f9045..85372cfa7b9f 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -75,7 +75,7 @@ static pi_minor_info_t pi_minor_call_table[] = {
{ NULL, 0 }, /* 0x00 */
{ irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
};
-static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }};
+static pi_major_info_t pi_major_call_table[] = { { pi_minor_call_table, 2 } };
static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
/************************ GLOBAL PROCEDURES ************************/
@@ -205,7 +205,7 @@ static void irttp_todo_expired(unsigned long data)
*/
static void irttp_flush_queues(struct tsap_cb *self)
{
- struct sk_buff* skb;
+ struct sk_buff *skb;
IRDA_DEBUG(4, "%s()\n", __func__);
@@ -400,7 +400,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
/* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
* use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
* JeanII */
- if((stsap_sel != LSAP_ANY) &&
+ if ((stsap_sel != LSAP_ANY) &&
((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
return NULL;
@@ -427,7 +427,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
ttp_notify.data_indication = irttp_data_indication;
ttp_notify.udata_indication = irttp_udata_indication;
ttp_notify.flow_indication = irttp_flow_indication;
- if(notify->status_indication != NULL)
+ if (notify->status_indication != NULL)
ttp_notify.status_indication = irttp_status_indication;
ttp_notify.instance = self;
strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
@@ -639,8 +639,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
*/
if ((self->tx_max_sdu_size != 0) &&
(self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
- (skb->len > self->tx_max_sdu_size))
- {
+ (skb->len > self->tx_max_sdu_size)) {
IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
__func__);
ret = -EMSGSIZE;
@@ -733,8 +732,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
* poll us through irttp_flow_indication() - Jean II */
while ((self->send_credit > 0) &&
(!irlmp_lap_tx_queue_full(self->lsap)) &&
- (skb = skb_dequeue(&self->tx_queue)))
- {
+ (skb = skb_dequeue(&self->tx_queue))) {
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
@@ -798,8 +796,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
* where we can spend a bit of time doing stuff. - Jean II */
if ((self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
- (!self->close_pend))
- {
+ (!self->close_pend)) {
if (self->notify.flow_indication)
self->notify.flow_indication(self->notify.instance,
self, FLOW_START);
@@ -892,7 +889,7 @@ static int irttp_udata_indication(void *instance, void *sap,
/* Just pass data to layer above */
if (self->notify.udata_indication) {
err = self->notify.udata_indication(self->notify.instance,
- self,skb);
+ self, skb);
/* Same comment as in irttp_do_data_indication() */
if (!err)
return 0;
@@ -1057,7 +1054,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
* to do that. Jean II */
/* If we need to send disconnect. try to do it now */
- if(self->disconnect_pend)
+ if (self->disconnect_pend)
irttp_start_todo_timer(self, 0);
}
@@ -1116,7 +1113,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
if (self->connected) {
- if(userdata)
+ if (userdata)
dev_kfree_skb(userdata);
return -EISCONN;
}
@@ -1137,7 +1134,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
- { dev_kfree_skb(userdata); return -1; } );
+ { dev_kfree_skb(userdata); return -1; });
}
/* Initialize connection parameters */
@@ -1157,7 +1154,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
* Give away max 127 credits for now
*/
if (n > 127) {
- self->avail_credit=n-127;
+ self->avail_credit = n - 127;
n = 127;
}
@@ -1166,10 +1163,10 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
- { dev_kfree_skb(tx_skb); return -1; } );
+ { dev_kfree_skb(tx_skb); return -1; });
/* Insert SAR parameters */
- frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
+ frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
@@ -1386,7 +1383,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
- { dev_kfree_skb(userdata); return -1; } );
+ { dev_kfree_skb(userdata); return -1; });
}
self->avail_credit = 0;
@@ -1409,10 +1406,10 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
- { dev_kfree_skb(tx_skb); return -1; } );
+ { dev_kfree_skb(tx_skb); return -1; });
/* Insert TTP header with SAR parameters */
- frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
+ frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
@@ -1522,7 +1519,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
* function may be called from various context, like user, timer
* for following a disconnect_indication() (i.e. net_bh).
* Jean II */
- if(test_and_set_bit(0, &self->disconnect_pend)) {
+ if (test_and_set_bit(0, &self->disconnect_pend)) {
IRDA_DEBUG(0, "%s(), disconnect already pending\n",
__func__);
if (userdata)
@@ -1627,7 +1624,7 @@ static void irttp_disconnect_indication(void *instance, void *sap,
* Jean II */
/* No need to notify the client if has already tried to disconnect */
- if(self->notify.disconnect_indication)
+ if (self->notify.disconnect_indication)
self->notify.disconnect_indication(self->notify.instance, self,
reason, skb);
else
@@ -1738,8 +1735,7 @@ static void irttp_run_rx_queue(struct tsap_cb *self)
* This is the last fragment, so time to reassemble!
*/
if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
- (self->rx_max_sdu_size == TTP_SAR_UNBOUND))
- {
+ (self->rx_max_sdu_size == TTP_SAR_UNBOUND)) {
/*
* A little optimizing. Only queue the fragment if
* there are other fragments. Since if this is the
@@ -1860,7 +1856,7 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "dtsap_sel: %02x\n",
self->dtsap_sel);
seq_printf(seq, " connected: %s, ",
- self->connected? "TRUE":"FALSE");
+ self->connected ? "TRUE" : "FALSE");
seq_printf(seq, "avail credit: %d, ",
self->avail_credit);
seq_printf(seq, "remote credit: %d, ",
@@ -1876,9 +1872,9 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "rx_queue len: %u\n",
skb_queue_len(&self->rx_queue));
seq_printf(seq, " tx_sdu_busy: %s, ",
- self->tx_sdu_busy? "TRUE":"FALSE");
+ self->tx_sdu_busy ? "TRUE" : "FALSE");
seq_printf(seq, "rx_sdu_busy: %s\n",
- self->rx_sdu_busy? "TRUE":"FALSE");
+ self->rx_sdu_busy ? "TRUE" : "FALSE");
seq_printf(seq, " max_seg_size: %u, ",
self->max_seg_size);
seq_printf(seq, "tx_max_sdu_size: %u, ",
diff --git a/net/key/af_key.c b/net/key/af_key.c
index ab8bd2cabfa0..9d585370c5b4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -45,7 +45,7 @@ struct netns_pfkey {
static DEFINE_MUTEX(pfkey_mutex);
#define DUMMY_MARK 0
-static struct xfrm_mark dummy_mark = {0, 0};
+static const struct xfrm_mark dummy_mark = {0, 0};
struct pfkey_sock {
/* struct sock must be the first member of struct pfkey_sock */
struct sock sk;
@@ -338,7 +338,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
return 0;
}
-static u8 sadb_ext_min_len[] = {
+static const u8 sadb_ext_min_len[] = {
[SADB_EXT_RESERVED] = (u8) 0,
[SADB_EXT_SA] = (u8) sizeof(struct sadb_sa),
[SADB_EXT_LIFETIME_CURRENT] = (u8) sizeof(struct sadb_lifetime),
@@ -1196,10 +1196,6 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
&x->props.saddr);
- if (!x->props.family) {
- err = -EAFNOSUPPORT;
- goto out;
- }
pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1],
&x->id.daddr);
@@ -2205,10 +2201,6 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
- if (!xp->family) {
- err = -EINVAL;
- goto out;
- }
xp->selector.family = xp->family;
xp->selector.prefixlen_s = sa->sadb_address_prefixlen;
xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2737,7 +2729,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
const struct sadb_msg *hdr, void * const *ext_hdrs);
-static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
+static const pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
[SADB_RESERVED] = pfkey_reserved,
[SADB_GETSPI] = pfkey_getspi,
[SADB_UPDATE] = pfkey_add,
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 48aaa89253e0..6cba486353e8 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -321,12 +321,12 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
if (llc->dev) {
if (!addr->sllc_arphrd)
addr->sllc_arphrd = llc->dev->type;
- if (llc_mac_null(addr->sllc_mac))
+ if (is_zero_ether_addr(addr->sllc_mac))
memcpy(addr->sllc_mac, llc->dev->dev_addr,
IFHWADDRLEN);
if (addr->sllc_arphrd != llc->dev->type ||
- !llc_mac_match(addr->sllc_mac,
- llc->dev->dev_addr)) {
+ !ether_addr_equal(addr->sllc_mac,
+ llc->dev->dev_addr)) {
rc = -EINVAL;
llc->dev = NULL;
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 0d0d416dfab6..cd8724177965 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -478,8 +478,8 @@ static inline bool llc_estab_match(const struct llc_sap *sap,
return llc->laddr.lsap == laddr->lsap &&
llc->daddr.lsap == daddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac) &&
- llc_mac_match(llc->daddr.mac, daddr->mac);
+ ether_addr_equal(llc->laddr.mac, laddr->mac) &&
+ ether_addr_equal(llc->daddr.mac, daddr->mac);
}
/**
@@ -550,7 +550,7 @@ static inline bool llc_listener_match(const struct llc_sap *sap,
return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
llc->laddr.lsap == laddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac);
+ ether_addr_equal(llc->laddr.mac, laddr->mac);
}
static struct sock *__llc_lookup_listener(struct llc_sap *sap,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 7b4799cfbf8d..1a3c7e0f5d0d 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -147,7 +147,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
}
seq_printf(seq, "@%02X ", llc->sap->laddr.lsap);
llc_ui_format_mac(seq, llc->daddr.mac);
- seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap,
+ seq_printf(seq, "@%02X %8d %8d %2d %3u %4d\n", llc->daddr.lsap,
sk_wmem_alloc_get(sk),
sk_rmem_alloc_get(sk) - llc->copied_seq,
sk->sk_state,
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 78be45cda5c1..e5850699098e 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -302,7 +302,7 @@ static inline bool llc_dgram_match(const struct llc_sap *sap,
return sk->sk_type == SOCK_DGRAM &&
llc->laddr.lsap == laddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac);
+ ether_addr_equal(llc->laddr.mac, laddr->mac);
}
/**
@@ -425,7 +425,7 @@ void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb)
llc_pdu_decode_da(skb, laddr.mac);
llc_pdu_decode_dsap(skb, &laddr.lsap);
- if (llc_mac_multicast(laddr.mac)) {
+ if (is_multicast_ether_addr(laddr.mac)) {
llc_sap_mcast(sap, &laddr, skb);
kfree_skb(skb);
} else {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 43dd7525bfcb..2e7855a1b10d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -395,9 +395,13 @@ void sta_set_rate_info_tx(struct sta_info *sta,
rinfo->nss = ieee80211_rate_get_vht_nss(rate);
} else {
struct ieee80211_supported_band *sband;
+ int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+ u16 brate;
+
sband = sta->local->hw.wiphy->bands[
ieee80211_get_sdata_band(sta->sdata)];
- rinfo->legacy = sband->bitrates[rate->idx].bitrate;
+ brate = sband->bitrates[rate->idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
@@ -422,11 +426,13 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
rinfo->mcs = sta->last_rx_rate_idx;
} else {
struct ieee80211_supported_band *sband;
+ int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+ u16 brate;
sband = sta->local->hw.wiphy->bands[
ieee80211_get_sdata_band(sta->sdata)];
- rinfo->legacy =
- sband->bitrates[sta->last_rx_rate_idx].bitrate;
+ brate = sband->bitrates[sta->last_rx_rate_idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
@@ -856,8 +862,8 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_beacon_data *params)
+int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_beacon_data *params)
{
struct beacon_data *new, *old;
int new_head_len, new_tail_len;
@@ -1020,6 +1026,12 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ /* don't allow changing the beacon while CSA is in place - offset
+ * of channel switch counter may change
+ */
+ if (sdata->vif.csa_active)
+ return -EBUSY;
+
old = rtnl_dereference(sdata->u.ap.beacon);
if (!old)
return -ENOENT;
@@ -1044,6 +1056,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
return -ENOENT;
old_probe_resp = rtnl_dereference(sdata->u.ap.probe_resp);
+ /* abort any running channel switch */
+ sdata->vif.csa_active = false;
+ cancel_work_sync(&sdata->csa_finalize_work);
+
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_off(vlan->dev);
@@ -1192,8 +1208,6 @@ static int sta_apply_parameters(struct ieee80211_local *local,
struct station_parameters *params)
{
int ret = 0;
- u32 rates;
- int i, j;
struct ieee80211_supported_band *sband;
struct ieee80211_sub_if_data *sdata = sta->sdata;
enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
@@ -1286,16 +1300,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
sta->listen_interval = params->listen_interval;
if (params->supported_rates) {
- rates = 0;
-
- for (i = 0; i < params->supported_rates_len; i++) {
- int rate = (params->supported_rates[i] & 0x7f) * 5;
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate)
- rates |= BIT(j);
- }
- }
- sta->sta.supp_rates[band] = rates;
+ ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
+ sband, params->supported_rates,
+ params->supported_rates_len,
+ &sta->sta.supp_rates[band]);
}
if (params->ht_capa)
@@ -1958,18 +1966,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
}
if (params->basic_rates) {
- int i, j;
- u32 rates = 0;
- struct ieee80211_supported_band *sband = wiphy->bands[band];
-
- for (i = 0; i < params->basic_rates_len; i++) {
- int rate = (params->basic_rates[i] & 0x7f) * 5;
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate)
- rates |= BIT(j);
- }
- }
- sdata->vif.bss_conf.basic_rates = rates;
+ ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
+ wiphy->bands[band],
+ params->basic_rates,
+ params->basic_rates_len,
+ &sdata->vif.bss_conf.basic_rates);
changed |= BSS_CHANGED_BASIC_RATES;
}
@@ -2301,14 +2302,25 @@ static void ieee80211_rfkill_poll(struct wiphy *wiphy)
}
#ifdef CONFIG_NL80211_TESTMODE
-static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
+static int ieee80211_testmode_cmd(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ void *data, int len)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_vif *vif = NULL;
if (!local->ops->testmode_cmd)
return -EOPNOTSUPP;
- return local->ops->testmode_cmd(&local->hw, data, len);
+ if (wdev) {
+ struct ieee80211_sub_if_data *sdata;
+
+ sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ if (sdata->flags & IEEE80211_SDATA_IN_DRIVER)
+ vif = &sdata->vif;
+ }
+
+ return local->ops->testmode_cmd(&local->hw, vif, data, len);
}
static int ieee80211_testmode_dump(struct wiphy *wiphy,
@@ -2786,6 +2798,178 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
return 0;
}
+static struct cfg80211_beacon_data *
+cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
+{
+ struct cfg80211_beacon_data *new_beacon;
+ u8 *pos;
+ int len;
+
+ len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len +
+ beacon->proberesp_ies_len + beacon->assocresp_ies_len +
+ beacon->probe_resp_len;
+
+ new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL);
+ if (!new_beacon)
+ return NULL;
+
+ pos = (u8 *)(new_beacon + 1);
+ if (beacon->head_len) {
+ new_beacon->head_len = beacon->head_len;
+ new_beacon->head = pos;
+ memcpy(pos, beacon->head, beacon->head_len);
+ pos += beacon->head_len;
+ }
+ if (beacon->tail_len) {
+ new_beacon->tail_len = beacon->tail_len;
+ new_beacon->tail = pos;
+ memcpy(pos, beacon->tail, beacon->tail_len);
+ pos += beacon->tail_len;
+ }
+ if (beacon->beacon_ies_len) {
+ new_beacon->beacon_ies_len = beacon->beacon_ies_len;
+ new_beacon->beacon_ies = pos;
+ memcpy(pos, beacon->beacon_ies, beacon->beacon_ies_len);
+ pos += beacon->beacon_ies_len;
+ }
+ if (beacon->proberesp_ies_len) {
+ new_beacon->proberesp_ies_len = beacon->proberesp_ies_len;
+ new_beacon->proberesp_ies = pos;
+ memcpy(pos, beacon->proberesp_ies, beacon->proberesp_ies_len);
+ pos += beacon->proberesp_ies_len;
+ }
+ if (beacon->assocresp_ies_len) {
+ new_beacon->assocresp_ies_len = beacon->assocresp_ies_len;
+ new_beacon->assocresp_ies = pos;
+ memcpy(pos, beacon->assocresp_ies, beacon->assocresp_ies_len);
+ pos += beacon->assocresp_ies_len;
+ }
+ if (beacon->probe_resp_len) {
+ new_beacon->probe_resp_len = beacon->probe_resp_len;
+ beacon->probe_resp = pos;
+ memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
+ pos += beacon->probe_resp_len;
+ }
+
+ return new_beacon;
+}
+
+void ieee80211_csa_finalize_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ csa_finalize_work);
+ struct ieee80211_local *local = sdata->local;
+ int err, changed;
+
+ if (!ieee80211_sdata_running(sdata))
+ return;
+
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
+ return;
+
+ sdata->radar_required = sdata->csa_radar_required;
+ err = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
+ &changed);
+ if (WARN_ON(err < 0))
+ return;
+
+ err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
+ if (err < 0)
+ return;
+
+ changed |= err;
+ kfree(sdata->u.ap.next_beacon);
+ sdata->u.ap.next_beacon = NULL;
+ sdata->vif.csa_active = false;
+
+ ieee80211_wake_queues_by_reason(&sdata->local->hw,
+ IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
+ ieee80211_bss_info_change_notify(sdata, changed);
+
+ cfg80211_ch_switch_notify(sdata->dev, &local->csa_chandef);
+}
+
+static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_chanctx *chanctx;
+ int err, num_chanctx;
+
+ if (!list_empty(&local->roc_list) || local->scanning)
+ return -EBUSY;
+
+ if (sdata->wdev.cac_started)
+ return -EBUSY;
+
+ if (cfg80211_chandef_identical(&params->chandef,
+ &sdata->vif.bss_conf.chandef))
+ return -EINVAL;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+
+ /* don't handle for multi-VIF cases */
+ chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+ if (chanctx->refcount > 1) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+ num_chanctx = 0;
+ list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
+ num_chanctx++;
+ rcu_read_unlock();
+
+ if (num_chanctx > 1)
+ return -EBUSY;
+
+ /* don't allow another channel switch if one is already active. */
+ if (sdata->vif.csa_active)
+ return -EBUSY;
+
+ /* only handle AP for now. */
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ sdata->u.ap.next_beacon = cfg80211_beacon_dup(&params->beacon_after);
+ if (!sdata->u.ap.next_beacon)
+ return -ENOMEM;
+
+ sdata->csa_counter_offset_beacon = params->counter_offset_beacon;
+ sdata->csa_counter_offset_presp = params->counter_offset_presp;
+ sdata->csa_radar_required = params->radar_required;
+
+ if (params->block_tx)
+ ieee80211_stop_queues_by_reason(&local->hw,
+ IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
+ err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
+ if (err < 0)
+ return err;
+
+ local->csa_chandef = params->chandef;
+ sdata->vif.csa_active = true;
+
+ ieee80211_bss_info_change_notify(sdata, err);
+ drv_channel_switch_beacon(sdata, &params->chandef);
+
+ return 0;
+}
+
static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
unsigned int wait, const u8 *buf, size_t len,
@@ -3503,4 +3687,5 @@ struct cfg80211_ops mac80211_config_ops = {
.get_et_strings = ieee80211_get_et_strings,
.get_channel = ieee80211_cfg_get_channel,
.start_radar_detection = ieee80211_start_radar_detection,
+ .channel_switch = ieee80211_channel_switch,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 03e8d2e3270e..3a4764b2869e 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -410,6 +410,64 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
return ret;
}
+int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 *changed)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *conf;
+ struct ieee80211_chanctx *ctx;
+ int ret;
+ u32 chanctx_changed = 0;
+
+ /* should never be called if not performing a channel switch. */
+ if (WARN_ON(!sdata->vif.csa_active))
+ return -EINVAL;
+
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+ IEEE80211_CHAN_DISABLED))
+ return -EINVAL;
+
+ mutex_lock(&local->chanctx_mtx);
+ conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ lockdep_is_held(&local->chanctx_mtx));
+ if (!conf) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx = container_of(conf, struct ieee80211_chanctx, conf);
+ if (ctx->refcount != 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sdata->vif.bss_conf.chandef.width != chandef->width) {
+ chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
+ *changed |= BSS_CHANGED_BANDWIDTH;
+ }
+
+ sdata->vif.bss_conf.chandef = *chandef;
+ ctx->conf.def = *chandef;
+
+ chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
+ drv_change_chanctx(local, ctx, chanctx_changed);
+
+ if (!local->use_chanctx) {
+ local->_oper_chandef = *chandef;
+ ieee80211_hw_config(local, 0);
+ }
+
+ ieee80211_recalc_chanctx_chantype(local, ctx);
+ ieee80211_recalc_smps_chanctx(local, ctx);
+ ieee80211_recalc_radar_chanctx(local, ctx);
+
+ ret = 0;
+ out:
+ mutex_unlock(&local->chanctx_mtx);
+ return ret;
+}
+
int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
u32 *changed)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 44e201d60a13..19c54a44ed47 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -455,6 +455,15 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
+ if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
+ debugfs_create_x32("driver_buffered_tids", 0400,
+ sta->debugfs.dir,
+ (u32 *)&sta->driver_buffered_tids);
+ else
+ debugfs_create_x64("driver_buffered_tids", 0400,
+ sta->debugfs.dir,
+ (u64 *)&sta->driver_buffered_tids);
+
drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index b931c96a596f..b3ea11f3d526 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1072,4 +1072,17 @@ static inline void drv_ipv6_addr_change(struct ieee80211_local *local,
}
#endif
+static inline void
+drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (local->ops->channel_switch_beacon) {
+ trace_drv_channel_switch_beacon(local, sdata, chandef);
+ local->ops->channel_switch_beacon(&local->hw, &sdata->vif,
+ chandef);
+ }
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f83534f6a2ee..529bf58bc145 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,13 +19,14 @@
#include "ieee80211_i.h"
#include "rate.h"
-static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
+static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa,
+ struct ieee80211_ht_cap *ht_capa_mask,
struct ieee80211_sta_ht_cap *ht_cap,
u16 flag)
{
__le16 le_flag = cpu_to_le16(flag);
- if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {
- if (!(sdata->u.mgd.ht_capa.cap_info & le_flag))
+ if (ht_capa_mask->cap_info & le_flag) {
+ if (!(ht_capa->cap_info & le_flag))
ht_cap->cap &= ~flag;
}
}
@@ -33,13 +34,30 @@ static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_ht_cap *ht_cap)
{
- u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask);
- u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
+ struct ieee80211_ht_cap *ht_capa, *ht_capa_mask;
+ u8 *scaps, *smask;
int i;
if (!ht_cap->ht_supported)
return;
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ ht_capa = &sdata->u.mgd.ht_capa;
+ ht_capa_mask = &sdata->u.mgd.ht_capa_mask;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ht_capa = &sdata->u.ibss.ht_capa;
+ ht_capa_mask = &sdata->u.ibss.ht_capa_mask;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ scaps = (u8 *)(&ht_capa->mcs.rx_mask);
+ smask = (u8 *)(&ht_capa_mask->mcs.rx_mask);
+
/* NOTE: If you add more over-rides here, update register_hw
* ht_capa_mod_msk logic in main.c as well.
* And, if this method can ever change ht_cap.ht_supported, fix
@@ -55,28 +73,32 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
}
/* Force removal of HT-40 capabilities? */
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40);
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_SGI_40);
/* Allow user to disable SGI-20 (SGI-40 is handled above) */
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_20);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_SGI_20);
/* Allow user to disable the max-AMSDU bit. */
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_MAX_AMSDU);
/* Allow user to decrease AMPDU factor */
- if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+ if (ht_capa_mask->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_FACTOR) {
- u8 n = sdata->u.mgd.ht_capa.ampdu_params_info
- & IEEE80211_HT_AMPDU_PARM_FACTOR;
+ u8 n = ht_capa->ampdu_params_info &
+ IEEE80211_HT_AMPDU_PARM_FACTOR;
if (n < ht_cap->ampdu_factor)
ht_cap->ampdu_factor = n;
}
/* Allow the user to increase AMPDU density. */
- if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+ if (ht_capa_mask->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY) {
- u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info &
+ u8 n = (ht_capa->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY)
>> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
if (n > ht_cap->ampdu_density)
@@ -112,7 +134,8 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
* we advertised a restricted capability set to. Override
* our own capabilities and then use those below.
*/
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ if ((sdata->vif.type == NL80211_IFTYPE_STATION ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
ieee80211_apply_htcap_overrides(sdata, &own_cap);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 2d45643c964e..a12afe77bb26 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -30,78 +30,27 @@
#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
+#define IEEE80211_IBSS_RSN_INACTIVITY_LIMIT (10 * HZ)
#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
-
-static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
- const u8 *bssid, const int beacon_int,
- struct cfg80211_chan_def *req_chandef,
- const u32 basic_rates,
- const u16 capability, u64 tsf,
- bool creator)
+static struct beacon_data *
+ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
+ const int beacon_int, const u32 basic_rates,
+ const u16 capability, u64 tsf,
+ struct cfg80211_chan_def *chandef,
+ bool *have_higher_than_11mbit)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
- int rates, i;
+ int rates_n = 0, i, ri;
struct ieee80211_mgmt *mgmt;
u8 *pos;
struct ieee80211_supported_band *sband;
- struct cfg80211_bss *bss;
- u32 bss_change;
- u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
- struct cfg80211_chan_def chandef;
- struct ieee80211_channel *chan;
+ u32 rate_flags, rates = 0, rates_added = 0;
struct beacon_data *presp;
int frame_len;
-
- sdata_assert_lock(sdata);
-
- /* Reset own TSF to allow time synchronization work. */
- drv_reset_tsf(local, sdata);
-
- if (!ether_addr_equal(ifibss->bssid, bssid))
- sta_info_flush(sdata);
-
- /* if merging, indicate to driver that we leave the old IBSS */
- if (sdata->vif.bss_conf.ibss_joined) {
- sdata->vif.bss_conf.ibss_joined = false;
- sdata->vif.bss_conf.ibss_creator = false;
- sdata->vif.bss_conf.enable_beacon = false;
- netif_carrier_off(sdata->dev);
- ieee80211_bss_info_change_notify(sdata,
- BSS_CHANGED_IBSS |
- BSS_CHANGED_BEACON_ENABLED);
- }
-
- presp = rcu_dereference_protected(ifibss->presp,
- lockdep_is_held(&sdata->wdev.mtx));
- rcu_assign_pointer(ifibss->presp, NULL);
- if (presp)
- kfree_rcu(presp, rcu_head);
-
- sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
-
- /* make a copy of the chandef, it could be modified below. */
- chandef = *req_chandef;
- chan = chandef.chan;
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
- chandef.width = NL80211_CHAN_WIDTH_20;
- chandef.center_freq1 = chan->center_freq;
- }
-
- ieee80211_vif_release_channel(sdata);
- if (ieee80211_vif_use_channel(sdata, &chandef,
- ifibss->fixed_channel ?
- IEEE80211_CHANCTX_SHARED :
- IEEE80211_CHANCTX_EXCLUSIVE)) {
- sdata_info(sdata, "Failed to join IBSS, no channel context\n");
- return;
- }
-
- memcpy(ifibss->bssid, bssid, ETH_ALEN);
-
- sband = local->hw.wiphy->bands[chan->band];
+ int shift;
/* Build IBSS probe response */
frame_len = sizeof(struct ieee80211_hdr_3addr) +
@@ -116,7 +65,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
ifibss->ie_len;
presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
if (!presp)
- return;
+ return NULL;
presp->head = (void *)(presp + 1);
@@ -137,21 +86,47 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memcpy(pos, ifibss->ssid, ifibss->ssid_len);
pos += ifibss->ssid_len;
- rates = min_t(int, 8, sband->n_bitrates);
+ sband = local->hw.wiphy->bands[chandef->chan->band];
+ rate_flags = ieee80211_chandef_rate_flags(chandef);
+ shift = ieee80211_chandef_get_shift(chandef);
+ rates_n = 0;
+ if (have_higher_than_11mbit)
+ *have_higher_than_11mbit = false;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ if (sband->bitrates[i].bitrate > 110 &&
+ have_higher_than_11mbit)
+ *have_higher_than_11mbit = true;
+
+ rates |= BIT(i);
+ rates_n++;
+ }
+
*pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = rates;
- for (i = 0; i < rates; i++) {
- int rate = sband->bitrates[i].bitrate;
+ *pos++ = min_t(int, 8, rates_n);
+ for (ri = 0; ri < sband->n_bitrates; ri++) {
+ int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
+ 5 * (1 << shift));
u8 basic = 0;
- if (basic_rates & BIT(i))
+ if (!(rates & BIT(ri)))
+ continue;
+
+ if (basic_rates & BIT(ri))
basic = 0x80;
- *pos++ = basic | (u8) (rate / 5);
+ *pos++ = basic | (u8) rate;
+ if (++rates_added == 8) {
+ ri++; /* continue at next rate for EXT_SUPP_RATES */
+ break;
+ }
}
if (sband->band == IEEE80211_BAND_2GHZ) {
*pos++ = WLAN_EID_DS_PARAMS;
*pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
+ *pos++ = ieee80211_frequency_to_channel(
+ chandef->chan->center_freq);
}
*pos++ = WLAN_EID_IBSS_PARAMS;
@@ -160,15 +135,20 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
*pos++ = 0;
*pos++ = 0;
- if (sband->n_bitrates > 8) {
+ /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
+ if (rates_n > 8) {
*pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = sband->n_bitrates - 8;
- for (i = 8; i < sband->n_bitrates; i++) {
- int rate = sband->bitrates[i].bitrate;
+ *pos++ = rates_n - 8;
+ for (; ri < sband->n_bitrates; ri++) {
+ int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
+ 5 * (1 << shift));
u8 basic = 0;
- if (basic_rates & BIT(i))
+ if (!(rates & BIT(ri)))
+ continue;
+
+ if (basic_rates & BIT(ri))
basic = 0x80;
- *pos++ = basic | (u8) (rate / 5);
+ *pos++ = basic | (u8) rate;
}
}
@@ -178,19 +158,23 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
/* add HT capability and information IEs */
- if (chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
- chandef.width != NL80211_CHAN_WIDTH_5 &&
- chandef.width != NL80211_CHAN_WIDTH_10 &&
+ if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT &&
+ chandef->width != NL80211_CHAN_WIDTH_5 &&
+ chandef->width != NL80211_CHAN_WIDTH_10 &&
sband->ht_cap.ht_supported) {
- pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
- sband->ht_cap.cap);
+ struct ieee80211_sta_ht_cap ht_cap;
+
+ memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
+ ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+ pos = ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
/*
* Note: According to 802.11n-2009 9.13.3.1, HT Protection
* field and RIFS Mode are reserved in IBSS mode, therefore
* keep them at 0
*/
pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
- &chandef, 0);
+ chandef, 0);
}
if (local->hw.queues >= IEEE80211_NUM_ACS) {
@@ -207,9 +191,97 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
presp->head_len = pos - presp->head;
if (WARN_ON(presp->head_len > frame_len))
+ goto error;
+
+ return presp;
+error:
+ kfree(presp);
+ return NULL;
+}
+
+static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid, const int beacon_int,
+ struct cfg80211_chan_def *req_chandef,
+ const u32 basic_rates,
+ const u16 capability, u64 tsf,
+ bool creator)
+{
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_mgmt *mgmt;
+ struct cfg80211_bss *bss;
+ u32 bss_change;
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_channel *chan;
+ struct beacon_data *presp;
+ enum nl80211_bss_scan_width scan_width;
+ bool have_higher_than_11mbit;
+
+ sdata_assert_lock(sdata);
+
+ /* Reset own TSF to allow time synchronization work. */
+ drv_reset_tsf(local, sdata);
+
+ if (!ether_addr_equal(ifibss->bssid, bssid))
+ sta_info_flush(sdata);
+
+ /* if merging, indicate to driver that we leave the old IBSS */
+ if (sdata->vif.bss_conf.ibss_joined) {
+ sdata->vif.bss_conf.ibss_joined = false;
+ sdata->vif.bss_conf.ibss_creator = false;
+ sdata->vif.bss_conf.enable_beacon = false;
+ netif_carrier_off(sdata->dev);
+ ieee80211_bss_info_change_notify(sdata,
+ BSS_CHANGED_IBSS |
+ BSS_CHANGED_BEACON_ENABLED);
+ }
+
+ presp = rcu_dereference_protected(ifibss->presp,
+ lockdep_is_held(&sdata->wdev.mtx));
+ rcu_assign_pointer(ifibss->presp, NULL);
+ if (presp)
+ kfree_rcu(presp, rcu_head);
+
+ sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
+
+ /* make a copy of the chandef, it could be modified below. */
+ chandef = *req_chandef;
+ chan = chandef.chan;
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+ if (chandef.width == NL80211_CHAN_WIDTH_5 ||
+ chandef.width == NL80211_CHAN_WIDTH_10 ||
+ chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ chandef.width == NL80211_CHAN_WIDTH_20) {
+ sdata_info(sdata,
+ "Failed to join IBSS, beacons forbidden\n");
+ return;
+ }
+ chandef.width = NL80211_CHAN_WIDTH_20;
+ chandef.center_freq1 = chan->center_freq;
+ }
+
+ ieee80211_vif_release_channel(sdata);
+ if (ieee80211_vif_use_channel(sdata, &chandef,
+ ifibss->fixed_channel ?
+ IEEE80211_CHANCTX_SHARED :
+ IEEE80211_CHANCTX_EXCLUSIVE)) {
+ sdata_info(sdata, "Failed to join IBSS, no channel context\n");
+ return;
+ }
+
+ memcpy(ifibss->bssid, bssid, ETH_ALEN);
+
+ sband = local->hw.wiphy->bands[chan->band];
+
+ presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
+ capability, tsf, &chandef,
+ &have_higher_than_11mbit);
+ if (!presp)
return;
rcu_assign_pointer(ifibss->presp, presp);
+ mgmt = (void *)presp->head;
sdata->vif.bss_conf.enable_beacon = true;
sdata->vif.bss_conf.beacon_int = beacon_int;
@@ -239,18 +311,26 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ;
bss_change |= BSS_CHANGED_ERP_SLOT;
+ /* cf. IEEE 802.11 9.2.12 */
+ if (chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit)
+ sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
+ else
+ sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
+
sdata->vif.bss_conf.ibss_joined = true;
sdata->vif.bss_conf.ibss_creator = creator;
ieee80211_bss_info_change_notify(sdata, bss_change);
- ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
+ ieee80211_set_wmm_default(sdata, true);
ifibss->state = IEEE80211_IBSS_MLME_JOINED;
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
- mgmt, presp->head_len, 0, GFP_KERNEL);
+ scan_width = cfg80211_chandef_to_scan_width(&chandef);
+ bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan,
+ scan_width, mgmt,
+ presp->head_len, 0, GFP_KERNEL);
cfg80211_put_bss(local->hw.wiphy, bss);
netif_carrier_on(sdata->dev);
cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
@@ -269,6 +349,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_bss_ies *ies;
enum nl80211_channel_type chan_type;
u64 tsf;
+ u32 rate_flags;
+ int shift;
sdata_assert_lock(sdata);
@@ -296,15 +378,24 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->u.ibss.chandef);
+ shift = ieee80211_vif_get_shift(&sdata->vif);
basic_rates = 0;
for (i = 0; i < bss->supp_rates_len; i++) {
- int rate = (bss->supp_rates[i] & 0x7f) * 5;
+ int rate = bss->supp_rates[i] & 0x7f;
bool is_basic = !!(bss->supp_rates[i] & 0x80);
for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
+ int brate;
+ if ((rate_flags & sband->bitrates[j].flags)
+ != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
+ 5 * (1 << shift));
+ if (brate == rate) {
if (is_basic)
basic_rates |= BIT(j);
break;
@@ -360,6 +451,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
struct sta_info *sta;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
+ enum nl80211_bss_scan_width scan_width;
int band;
/*
@@ -388,6 +480,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
if (WARN_ON_ONCE(!chanctx_conf))
return NULL;
band = chanctx_conf->def.chan->band;
+ scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
@@ -401,7 +494,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband);
+ ieee80211_mandatory_rates(sband, scan_width);
return ieee80211_ibss_finish_sta(sta);
}
@@ -465,6 +558,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
u64 beacon_timestamp, rx_timestamp;
u32 supp_rates = 0;
enum ieee80211_band band = rx_status->band;
+ enum nl80211_bss_scan_width scan_width;
struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
bool rates_updated = false;
@@ -486,16 +580,22 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sta = sta_info_get(sdata, mgmt->sa);
if (elems->supp_rates) {
- supp_rates = ieee80211_sta_get_rates(local, elems,
+ supp_rates = ieee80211_sta_get_rates(sdata, elems,
band, NULL);
if (sta) {
u32 prev_rates;
prev_rates = sta->sta.supp_rates[band];
/* make sure mandatory rates are always added */
- sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband);
+ scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ if (rx_status->flag & RX_FLAG_5MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ if (rx_status->flag & RX_FLAG_10MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_10;
+ sta->sta.supp_rates[band] = supp_rates |
+ ieee80211_mandatory_rates(sband,
+ scan_width);
if (sta->sta.supp_rates[band] != prev_rates) {
ibss_dbg(sdata,
"updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
@@ -610,7 +710,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
"beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n",
mgmt->bssid);
ieee80211_sta_join_ibss(sdata, bss);
- supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
+ supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
supp_rates);
rcu_read_unlock();
@@ -629,6 +729,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
+ enum nl80211_bss_scan_width scan_width;
int band;
/*
@@ -654,6 +755,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
return;
}
band = chanctx_conf->def.chan->band;
+ scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -665,7 +767,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband);
+ ieee80211_mandatory_rates(sband, scan_width);
spin_lock(&ifibss->incomplete_lock);
list_add(&sta->list, &ifibss->incomplete_stations);
@@ -697,6 +799,33 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
return active;
}
+static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta, *tmp;
+ unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT;
+ unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
+
+ mutex_lock(&local->sta_mtx);
+
+ list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+ if (sdata != sta->sdata)
+ continue;
+
+ if (time_after(jiffies, sta->last_rx + exp_time) ||
+ (time_after(jiffies, sta->last_rx + exp_rsn_time) &&
+ sta->sta_state != IEEE80211_STA_AUTHORIZED)) {
+ sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n",
+ sta->sta_state != IEEE80211_STA_AUTHORIZED ?
+ "not authorized " : "", sta->sta.addr);
+
+ WARN_ON(__sta_info_destroy(sta));
+ }
+ }
+
+ mutex_unlock(&local->sta_mtx);
+}
+
/*
* This function is called with state == IEEE80211_IBSS_MLME_JOINED
*/
@@ -704,13 +833,14 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ enum nl80211_bss_scan_width scan_width;
sdata_assert_lock(sdata);
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
+ ieee80211_ibss_sta_expire(sdata);
if (time_before(jiffies, ifibss->last_scan_completed +
IEEE80211_IBSS_MERGE_INTERVAL))
@@ -725,8 +855,9 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
sdata_info(sdata,
"No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
+ scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
- NULL);
+ NULL, scan_width);
}
static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -776,6 +907,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
struct cfg80211_bss *cbss;
struct ieee80211_channel *chan = NULL;
const u8 *bssid = NULL;
+ enum nl80211_bss_scan_width scan_width;
int active_ibss;
u16 capability;
@@ -817,6 +949,17 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
return;
}
+ /* if a fixed bssid and a fixed freq have been provided create the IBSS
+ * directly and do not waste time scanning
+ */
+ if (ifibss->fixed_bssid && ifibss->fixed_channel) {
+ sdata_info(sdata, "Created IBSS using preconfigured BSSID %pM\n",
+ bssid);
+ ieee80211_sta_create_ibss(sdata);
+ return;
+ }
+
+
ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n");
/* Selected IBSS not found in current scan results - try to scan */
@@ -824,8 +967,10 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
IEEE80211_SCAN_INTERVAL)) {
sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
+ scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
ieee80211_request_ibss_scan(sdata, ifibss->ssid,
- ifibss->ssid_len, chan);
+ ifibss->ssid_len, chan,
+ scan_width);
} else {
int interval = IEEE80211_SCAN_INTERVAL;
@@ -1045,6 +1190,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
struct cfg80211_ibss_params *params)
{
u32 changed = 0;
+ u32 rate_flags;
+ struct ieee80211_supported_band *sband;
+ int i;
if (params->bssid) {
memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
@@ -1055,6 +1203,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.privacy = params->privacy;
sdata->u.ibss.control_port = params->control_port;
sdata->u.ibss.basic_rates = params->basic_rates;
+
+ /* fix basic_rates if channel does not support these rates */
+ rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
+ sband = sdata->local->hw.wiphy->bands[params->chandef.chan->band];
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ sdata->u.ibss.basic_rates &= ~BIT(i);
+ }
memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
sizeof(params->mcast_rate));
@@ -1076,6 +1232,11 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len);
sdata->u.ibss.ssid_len = params->ssid_len;
+ memcpy(&sdata->u.ibss.ht_capa, &params->ht_capa,
+ sizeof(sdata->u.ibss.ht_capa));
+ memcpy(&sdata->u.ibss.ht_capa_mask, &params->ht_capa_mask,
+ sizeof(sdata->u.ibss.ht_capa_mask));
+
/*
* 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
* reserved, but an HT STA shall protect HT transmissions as though
@@ -1156,6 +1317,11 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
presp = rcu_dereference_protected(ifibss->presp,
lockdep_is_held(&sdata->wdev.mtx));
RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
+
+ /* on the next join, re-program HT parameters */
+ memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
+ memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask));
+
sdata->vif.bss_conf.ibss_joined = false;
sdata->vif.bss_conf.ibss_creator = false;
sdata->vif.bss_conf.enable_beacon = false;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8412a303993a..b6186517ec56 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -53,9 +53,6 @@ struct ieee80211_local;
* increased memory use (about 2 kB of RAM per entry). */
#define IEEE80211_FRAGMENT_MAX 4
-#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
-#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
-
/* power level hasn't been configured (or set to automatic) */
#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
@@ -259,6 +256,8 @@ struct ieee80211_if_ap {
struct beacon_data __rcu *beacon;
struct probe_resp __rcu *probe_resp;
+ /* to be used after channel switch. */
+ struct cfg80211_beacon_data *next_beacon;
struct list_head vlans;
struct ps_data ps;
@@ -509,6 +508,9 @@ struct ieee80211_if_ibss {
/* probe response/beacon for IBSS */
struct beacon_data __rcu *presp;
+ struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
+ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
+
spinlock_t incomplete_lock;
struct list_head incomplete_stations;
@@ -713,6 +715,11 @@ struct ieee80211_sub_if_data {
struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
+ struct work_struct csa_finalize_work;
+ int csa_counter_offset_beacon;
+ int csa_counter_offset_presp;
+ bool csa_radar_required;
+
/* used to reconfigure hardware SM PS */
struct work_struct recalc_smps;
@@ -809,6 +816,34 @@ ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
return band;
}
+static inline int
+ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return 2;
+ case NL80211_CHAN_WIDTH_10:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline int
+ieee80211_vif_get_shift(struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ int shift = 0;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (chanctx_conf)
+ shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
+ rcu_read_unlock();
+
+ return shift;
+}
+
enum sdata_queue_type {
IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
IEEE80211_SDATA_QUEUE_AGG_START = 1,
@@ -1026,7 +1061,7 @@ struct ieee80211_local {
struct cfg80211_ssid scan_ssid;
struct cfg80211_scan_request *int_scan_req;
struct cfg80211_scan_request *scan_req, *hw_scan_req;
- struct ieee80211_channel *scan_channel;
+ struct cfg80211_chan_def scan_chandef;
enum ieee80211_band hw_scan_band;
int scan_channel_idx;
int scan_ies_len;
@@ -1063,7 +1098,6 @@ struct ieee80211_local {
u32 dot11TransmittedFrameCount;
#ifdef CONFIG_MAC80211_LEDS
- int tx_led_counter, rx_led_counter;
struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led;
struct tpt_led_trigger *tpt_led_trigger;
char tx_led_name[32], rx_led_name[32],
@@ -1306,7 +1340,8 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
void ieee80211_scan_work(struct work_struct *work);
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan);
+ struct ieee80211_channel *chan,
+ enum nl80211_bss_scan_width scan_width);
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1341,6 +1376,9 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
void ieee80211_sw_roc_work(struct work_struct *work);
void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
+/* channel switch handling */
+void ieee80211_csa_finalize_work(struct work_struct *work);
+
/* interface handling */
int ieee80211_iface_init(void);
void ieee80211_iface_exit(void);
@@ -1362,6 +1400,8 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
+int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_beacon_data *params);
static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
{
@@ -1465,7 +1505,8 @@ extern void *mac80211_wiphy_privid; /* for wiphy privid */
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type);
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
- int rate, int erp, int short_preamble);
+ int rate, int erp, int short_preamble,
+ int shift);
void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
struct ieee80211_hdr *hdr, const u8 *tsc,
gfp_t gfp);
@@ -1569,7 +1610,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
size_t buffer_len, const u8 *ie, size_t ie_len,
enum ieee80211_band band, u32 rate_mask,
- u8 channel);
+ struct cfg80211_chan_def *chandef);
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
u8 *dst, u32 ratemask,
struct ieee80211_channel *chan,
@@ -1582,10 +1623,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
u32 ratemask, bool directed, u32 tx_flags,
struct ieee80211_channel *channel, bool scan);
-void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
- const size_t supp_rates_len,
- const u8 *supp_rates);
-u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
+u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum ieee80211_band band, u32 *basic_rates);
int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
@@ -1602,6 +1640,9 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 prot_mode);
u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
+int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates);
int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic,
enum ieee80211_band band);
@@ -1622,6 +1663,11 @@ int __must_check
ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
u32 *changed);
+/* NOTE: only use ieee80211_vif_change_channel() for channel switch */
+int __must_check
+ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 *changed);
void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index cc117591f678..fcecd633514e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -54,7 +54,7 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
return false;
}
- power = chanctx_conf->def.chan->max_power;
+ power = ieee80211_chandef_max_power(&chanctx_conf->def);
rcu_read_unlock();
if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL)
@@ -274,6 +274,12 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
if (iftype == NL80211_IFTYPE_ADHOC &&
nsdata->vif.type == NL80211_IFTYPE_ADHOC)
return -EBUSY;
+ /*
+ * will not add another interface while any channel
+ * switch is active.
+ */
+ if (nsdata->vif.csa_active)
+ return -EBUSY;
/*
* The remaining checks are only performed for interfaces
@@ -302,12 +308,13 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
+ enum nl80211_iftype iftype)
{
int n_queues = sdata->local->hw.queues;
int i;
- if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
+ if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
IEEE80211_INVAL_HW_QUEUE))
@@ -318,8 +325,9 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
}
}
- if ((sdata->vif.type != NL80211_IFTYPE_AP &&
- sdata->vif.type != NL80211_IFTYPE_MESH_POINT) ||
+ if ((iftype != NL80211_IFTYPE_AP &&
+ iftype != NL80211_IFTYPE_P2P_GO &&
+ iftype != NL80211_IFTYPE_MESH_POINT) ||
!(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) {
sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
return 0;
@@ -402,7 +410,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
return ret;
}
- ret = ieee80211_check_queues(sdata);
+ ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR);
if (ret) {
kfree(sdata);
return ret;
@@ -586,7 +594,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
res = drv_add_interface(local, sdata);
if (res)
goto err_stop;
- res = ieee80211_check_queues(sdata);
+ res = ieee80211_check_queues(sdata,
+ ieee80211_vif_type_p2p(&sdata->vif));
if (res)
goto err_del_interface;
}
@@ -804,6 +813,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
cancel_work_sync(&local->dynamic_ps_enable_work);
cancel_work_sync(&sdata->recalc_smps);
+ sdata->vif.csa_active = false;
+ cancel_work_sync(&sdata->csa_finalize_work);
cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
@@ -1267,6 +1278,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
skb_queue_head_init(&sdata->skb_queue);
INIT_WORK(&sdata->work, ieee80211_iface_work);
INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
+ INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
switch (type) {
case NL80211_IFTYPE_P2P_GO:
@@ -1380,14 +1392,14 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
ret = drv_change_interface(local, sdata, internal_type, p2p);
if (ret)
- type = sdata->vif.type;
+ type = ieee80211_vif_type_p2p(&sdata->vif);
/*
* Ignore return value here, there's not much we can do since
* the driver changed the interface type internally already.
* The warnings will hopefully make driver authors fix it :-)
*/
- ieee80211_check_queues(sdata);
+ ieee80211_check_queues(sdata, type);
ieee80211_setup_sdata(sdata, type);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index e39cc91d0cf1..620677e897bd 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -93,6 +93,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
might_sleep();
+ if (key->flags & KEY_FLAG_TAINTED)
+ return -EINVAL;
+
if (!key->local->ops->set_key)
goto out_unsupported;
@@ -455,6 +458,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_key *old_key;
int idx, ret;
bool pairwise;
@@ -484,10 +488,13 @@ int ieee80211_key_link(struct ieee80211_key *key,
ieee80211_debugfs_key_add(key);
- ret = ieee80211_key_enable_hw_accel(key);
-
- if (ret)
- ieee80211_key_free(key, true);
+ if (!local->wowlan) {
+ ret = ieee80211_key_enable_hw_accel(key);
+ if (ret)
+ ieee80211_key_free(key, true);
+ } else {
+ ret = 0;
+ }
mutex_unlock(&sdata->local->key_mtx);
@@ -540,7 +547,7 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
void *iter_data)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_key *key;
+ struct ieee80211_key *key, *tmp;
struct ieee80211_sub_if_data *sdata;
ASSERT_RTNL();
@@ -548,13 +555,14 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
mutex_lock(&local->key_mtx);
if (vif) {
sdata = vif_to_sdata(vif);
- list_for_each_entry(key, &sdata->key_list, list)
+ list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
iter(hw, &sdata->vif,
key->sta ? &key->sta->sta : NULL,
&key->conf, iter_data);
} else {
list_for_each_entry(sdata, &local->interfaces, list)
- list_for_each_entry(key, &sdata->key_list, list)
+ list_for_each_entry_safe(key, tmp,
+ &sdata->key_list, list)
iter(hw, &sdata->vif,
key->sta ? &key->sta->sta : NULL,
&key->conf, iter_data);
@@ -751,3 +759,135 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
}
}
EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
+
+void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
+ struct ieee80211_key_seq *seq)
+{
+ struct ieee80211_key *key;
+ u64 pn64;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->u.tkip.tx.iv32 = seq->tkip.iv32;
+ key->u.tkip.tx.iv16 = seq->tkip.iv16;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ pn64 = (u64)seq->ccmp.pn[5] |
+ ((u64)seq->ccmp.pn[4] << 8) |
+ ((u64)seq->ccmp.pn[3] << 16) |
+ ((u64)seq->ccmp.pn[2] << 24) |
+ ((u64)seq->ccmp.pn[1] << 32) |
+ ((u64)seq->ccmp.pn[0] << 40);
+ atomic64_set(&key->u.ccmp.tx_pn, pn64);
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ pn64 = (u64)seq->aes_cmac.pn[5] |
+ ((u64)seq->aes_cmac.pn[4] << 8) |
+ ((u64)seq->aes_cmac.pn[3] << 16) |
+ ((u64)seq->aes_cmac.pn[2] << 24) |
+ ((u64)seq->aes_cmac.pn[1] << 32) |
+ ((u64)seq->aes_cmac.pn[0] << 40);
+ atomic64_set(&key->u.aes_cmac.tx_pn, pn64);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ieee80211_set_key_tx_seq);
+
+void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
+ int tid, struct ieee80211_key_seq *seq)
+{
+ struct ieee80211_key *key;
+ u8 *pn;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS))
+ return;
+ key->u.tkip.rx[tid].iv32 = seq->tkip.iv32;
+ key->u.tkip.rx[tid].iv16 = seq->tkip.iv16;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
+ return;
+ if (tid < 0)
+ pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS];
+ else
+ pn = key->u.ccmp.rx_pn[tid];
+ memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN);
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (WARN_ON(tid != 0))
+ return;
+ pn = key->u.aes_cmac.rx_pn;
+ memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq);
+
+void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
+{
+ struct ieee80211_key *key;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ assert_key_lock(key->local);
+
+ /*
+ * if key was uploaded, we assume the driver will/has remove(d)
+ * it, so adjust bookkeeping accordingly
+ */
+ if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
+ key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
+
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
+ increment_tailroom_need_count(key->sdata);
+ }
+
+ ieee80211_key_free(key, false);
+}
+EXPORT_SYMBOL_GPL(ieee80211_remove_key);
+
+struct ieee80211_key_conf *
+ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_key *key;
+ int err;
+
+ if (WARN_ON(!local->wowlan))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+ return ERR_PTR(-EINVAL);
+
+ key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx,
+ keyconf->keylen, keyconf->key,
+ 0, NULL);
+ if (IS_ERR(key))
+ return ERR_PTR(PTR_ERR(key));
+
+ if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
+ key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+
+ err = ieee80211_key_link(key, sdata, NULL);
+ if (err)
+ return ERR_PTR(err);
+
+ return &key->conf;
+}
+EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add);
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index bcffa6903129..e2b836446af3 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -12,27 +12,22 @@
#include <linux/export.h>
#include "led.h"
+#define MAC80211_BLINK_DELAY 50 /* ms */
+
void ieee80211_led_rx(struct ieee80211_local *local)
{
+ unsigned long led_delay = MAC80211_BLINK_DELAY;
if (unlikely(!local->rx_led))
return;
- if (local->rx_led_counter++ % 2 == 0)
- led_trigger_event(local->rx_led, LED_OFF);
- else
- led_trigger_event(local->rx_led, LED_FULL);
+ led_trigger_blink_oneshot(local->rx_led, &led_delay, &led_delay, 0);
}
-/* q is 1 if a packet was enqueued, 0 if it has been transmitted */
-void ieee80211_led_tx(struct ieee80211_local *local, int q)
+void ieee80211_led_tx(struct ieee80211_local *local)
{
+ unsigned long led_delay = MAC80211_BLINK_DELAY;
if (unlikely(!local->tx_led))
return;
- /* not sure how this is supposed to work ... */
- local->tx_led_counter += 2*q-1;
- if (local->tx_led_counter % 2 == 0)
- led_trigger_event(local->tx_led, LED_OFF);
- else
- led_trigger_event(local->tx_led, LED_FULL);
+ led_trigger_blink_oneshot(local->tx_led, &led_delay, &led_delay, 0);
}
void ieee80211_led_assoc(struct ieee80211_local *local, bool associated)
diff --git a/net/mac80211/led.h b/net/mac80211/led.h
index e0275d9befa8..89f4344f13b9 100644
--- a/net/mac80211/led.h
+++ b/net/mac80211/led.h
@@ -13,7 +13,7 @@
#ifdef CONFIG_MAC80211_LEDS
void ieee80211_led_rx(struct ieee80211_local *local);
-void ieee80211_led_tx(struct ieee80211_local *local, int q);
+void ieee80211_led_tx(struct ieee80211_local *local);
void ieee80211_led_assoc(struct ieee80211_local *local,
bool associated);
void ieee80211_led_radio(struct ieee80211_local *local,
@@ -27,7 +27,7 @@ void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
static inline void ieee80211_led_rx(struct ieee80211_local *local)
{
}
-static inline void ieee80211_led_tx(struct ieee80211_local *local, int q)
+static inline void ieee80211_led_tx(struct ieee80211_local *local)
{
}
static inline void ieee80211_led_assoc(struct ieee80211_local *local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 091088ac7890..21d5d44444d0 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -102,17 +102,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (local->scan_channel) {
- chandef.chan = local->scan_channel;
- /* If scanning on oper channel, use whatever channel-type
- * is currently in use.
- */
- if (chandef.chan == local->_oper_chandef.chan) {
- chandef = local->_oper_chandef;
- } else {
- chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef.center_freq1 = chandef.chan->center_freq;
- }
+ if (local->scan_chandef.chan) {
+ chandef = local->scan_chandef;
} else if (local->tmp_channel) {
chandef.chan = local->tmp_channel;
chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -151,7 +142,7 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
- power = chandef.chan->max_power;
+ power = ieee80211_chandef_max_power(&chandef);
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
@@ -901,9 +892,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!local->ops->remain_on_channel)
local->hw.wiphy->max_remain_on_channel_duration = 5000;
- if (local->ops->sched_scan_start)
- local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-
/* mac80211 based drivers don't support internal TDLS setup */
if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 447f41bbe744..707ac61d63e5 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -62,7 +62,6 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *ie)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee80211_local *local = sdata->local;
u32 basic_rates = 0;
struct cfg80211_chan_def sta_chan_def;
@@ -85,7 +84,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
(ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
return false;
- ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata),
+ ieee80211_sta_get_rates(sdata, ie, ieee80211_get_sdata_band(sdata),
&basic_rates);
if (sdata->vif.bss_conf.basic_rates != basic_rates)
@@ -274,7 +273,9 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS);
*pos++ = neighbors << 1;
/* Mesh capability */
- *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
+ *pos = 0x00;
+ *pos |= ifmsh->mshcfg.dot11MeshForwarding ?
+ IEEE80211_MESHCONF_CAPAB_FORWARDING : 0x00;
*pos |= ifmsh->accepting_plinks ?
IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
@@ -831,6 +832,9 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata,
ieee802_11_parse_elems(pos, len - baselen, false, &elems);
+ if (!elems.mesh_id)
+ return;
+
/* 802.11-2012 10.1.4.3.2 */
if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
!is_broadcast_ether_addr(mgmt->da)) ||
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 02c05fa15c20..6b65d5055f5b 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -379,7 +379,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
u32 rates, basic_rates = 0, changed = 0;
sband = local->hw.wiphy->bands[band];
- rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
+ rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
spin_lock_bh(&sta->lock);
sta->last_rx = jiffies;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cc9e02d79b55..86e4ad56b573 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -489,27 +489,6 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
/* frame sending functions */
-static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
- struct ieee80211_supported_band *sband,
- u32 *rates)
-{
- int i, j, count;
- *rates = 0;
- count = 0;
- for (i = 0; i < supp_rates_len; i++) {
- int rate = (supp_rates[i] & 0x7F) * 5;
-
- for (j = 0; j < sband->n_bitrates; j++)
- if (sband->bitrates[j].bitrate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
-
- return count;
-}
-
static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ap_ht_param,
struct ieee80211_supported_band *sband,
@@ -628,12 +607,12 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
struct ieee80211_mgmt *mgmt;
u8 *pos, qos_info;
size_t offset = 0, noffset;
- int i, count, rates_len, supp_rates_len;
+ int i, count, rates_len, supp_rates_len, shift;
u16 capab;
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *chan;
- u32 rates = 0;
+ u32 rate_flags, rates = 0;
sdata_assert_lock(sdata);
@@ -644,8 +623,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
return;
}
chan = chanctx_conf->def.chan;
+ rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
rcu_read_unlock();
sband = local->hw.wiphy->bands[chan->band];
+ shift = ieee80211_vif_get_shift(&sdata->vif);
if (assoc_data->supp_rates_len) {
/*
@@ -654,17 +635,24 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode)...
*/
- rates_len = ieee80211_compatible_rates(assoc_data->supp_rates,
- assoc_data->supp_rates_len,
- sband, &rates);
+ rates_len = ieee80211_parse_bitrates(&chanctx_conf->def, sband,
+ assoc_data->supp_rates,
+ assoc_data->supp_rates_len,
+ &rates);
} else {
/*
* In case AP not provide any supported rates information
* before association, we send information element(s) with
* all rates that we support.
*/
- rates = ~0;
- rates_len = sband->n_bitrates;
+ rates_len = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags)
+ != rate_flags)
+ continue;
+ rates |= BIT(i);
+ rates_len++;
+ }
}
skb = alloc_skb(local->hw.extra_tx_headroom +
@@ -741,8 +729,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
count = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
+ int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = (u8) rate;
if (++count == 8)
break;
}
@@ -755,8 +744,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
for (i++; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
+ int rate;
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = (u8) rate;
}
}
}
@@ -767,7 +758,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
*pos++ = WLAN_EID_PWR_CAPABILITY;
*pos++ = 2;
*pos++ = 0; /* min tx power */
- *pos++ = chan->max_power; /* max tx power */
+ /* max tx power */
+ *pos++ = ieee80211_chandef_max_power(&chanctx_conf->def);
/* 2. supported channels */
/* TODO: get this in reg domain format */
@@ -1121,6 +1113,15 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
case -1:
cfg80211_chandef_create(&new_chandef, new_chan,
NL80211_CHAN_NO_HT);
+ /* keep width for 5/10 MHz channels */
+ switch (sdata->vif.bss_conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ new_chandef.width = sdata->vif.bss_conf.chandef.width;
+ break;
+ default:
+ break;
+ }
break;
}
@@ -2443,15 +2444,16 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
u8 *supp_rates, unsigned int supp_rates_len,
u32 *rates, u32 *basic_rates,
bool *have_higher_than_11mbit,
- int *min_rate, int *min_rate_index)
+ int *min_rate, int *min_rate_index,
+ int shift, u32 rate_flags)
{
int i, j;
for (i = 0; i < supp_rates_len; i++) {
- int rate = (supp_rates[i] & 0x7f) * 5;
+ int rate = supp_rates[i] & 0x7f;
bool is_basic = !!(supp_rates[i] & 0x80);
- if (rate > 110)
+ if ((rate * 5 * (1 << shift)) > 110)
*have_higher_than_11mbit = true;
/*
@@ -2467,12 +2469,20 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
continue;
for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
+ struct ieee80211_rate *br;
+ int brate;
+
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+ if (brate == rate) {
*rates |= BIT(j);
if (is_basic)
*basic_rates |= BIT(j);
- if (rate < *min_rate) {
- *min_rate = rate;
+ if ((rate * 5) < *min_rate) {
+ *min_rate = rate * 5;
*min_rate_index = j;
}
break;
@@ -2851,14 +2861,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_put(local, bss);
sdata->vif.bss_conf.beacon_rate = bss->beacon_rate;
}
-
- if (!sdata->u.mgd.associated ||
- !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid))
- return;
-
- ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
- elems, true);
-
}
@@ -3147,6 +3149,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
+ ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
+ &elems, true);
+
if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
elems.wmm_param_len))
changed |= BSS_CHANGED_QOS;
@@ -3902,27 +3907,40 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (!new_sta)
return -ENOMEM;
}
-
if (new_sta) {
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
int min_rate = INT_MAX, min_rate_index = -1;
+ struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
const struct cfg80211_bss_ies *ies;
+ int shift;
+ u32 rate_flags;
sband = local->hw.wiphy->bands[cbss->channel->band];
err = ieee80211_prep_channel(sdata, cbss);
if (err) {
sta_info_free(local, new_sta);
- return err;
+ return -EINVAL;
+ }
+ shift = ieee80211_vif_get_shift(&sdata->vif);
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return -EINVAL;
}
+ rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
+ rcu_read_unlock();
ieee80211_get_rates(sband, bss->supp_rates,
bss->supp_rates_len,
&rates, &basic_rates,
&have_higher_than_11mbit,
- &min_rate, &min_rate_index);
+ &min_rate, &min_rate_index,
+ shift, rate_flags);
/*
* This used to be a workaround for basic rates missing
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 30d58d2d13e2..e126605cec66 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -210,7 +210,7 @@ static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
!ieee80211_is_data(fc);
}
-static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
+static void rc_send_low_basicrate(s8 *idx, u32 basic_rates,
struct ieee80211_supported_band *sband)
{
u8 i;
@@ -232,37 +232,28 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
/* could not find a basic rate; use original selection */
}
-static inline s8
-rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta)
+static void __rate_control_send_low(struct ieee80211_hw *hw,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta,
+ struct ieee80211_tx_info *info)
{
int i;
+ u32 rate_flags =
+ ieee80211_chandef_rate_flags(&hw->conf.chandef);
+
+ if ((sband->band == IEEE80211_BAND_2GHZ) &&
+ (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
+ rate_flags |= IEEE80211_RATE_ERP_G;
+ info->control.rates[0].idx = 0;
for (i = 0; i < sband->n_bitrates; i++) {
- struct ieee80211_rate *srate = &sband->bitrates[i];
- if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
- (srate->bitrate == 55) || (srate->bitrate == 110))
+ if (!rate_supported(sta, sband->band, i))
continue;
- if (rate_supported(sta, sband->band, i))
- return i;
+ info->control.rates[0].idx = i;
+ break;
}
-
- /* No matching rate found */
- return 0;
-}
-
-static void __rate_control_send_low(struct ieee80211_hw *hw,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta,
- struct ieee80211_tx_info *info)
-{
- if ((sband->band != IEEE80211_BAND_2GHZ) ||
- !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
- info->control.rates[0].idx = rate_lowest_index(sband, sta);
- else
- info->control.rates[0].idx =
- rate_lowest_non_cck_index(sband, sta);
+ WARN_ON_ONCE(i == sband->n_bitrates);
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
@@ -272,28 +263,37 @@ static void __rate_control_send_low(struct ieee80211_hw *hw,
}
-bool rate_control_send_low(struct ieee80211_sta *sta,
+bool rate_control_send_low(struct ieee80211_sta *pubsta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
struct ieee80211_supported_band *sband = txrc->sband;
+ struct sta_info *sta;
int mcast_rate;
+ bool use_basicrate = false;
- if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
- __rate_control_send_low(txrc->hw, sband, sta, info);
+ if (!pubsta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
+ __rate_control_send_low(txrc->hw, sband, pubsta, info);
- if (!sta && txrc->bss) {
+ if (!pubsta && txrc->bss) {
mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
if (mcast_rate > 0) {
info->control.rates[0].idx = mcast_rate - 1;
return true;
}
+ use_basicrate = true;
+ } else if (pubsta) {
+ sta = container_of(pubsta, struct sta_info, sta);
+ if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+ use_basicrate = true;
+ }
- rc_send_low_broadcast(&info->control.rates[0].idx,
+ if (use_basicrate)
+ rc_send_low_basicrate(&info->control.rates[0].idx,
txrc->bss_conf->basic_rates,
sband);
- }
+
return true;
}
return false;
@@ -585,6 +585,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
bool has_mcs_mask;
u32 mask;
+ u32 rate_flags;
int i;
/*
@@ -594,6 +595,12 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
*/
mask = sdata->rc_rateidx_mask[info->band];
has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ for (i = 0; i < sband->n_bitrates; i++)
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ mask &= ~BIT(i);
+
if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
return;
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index d35a5dd3fb13..5dedc56c94db 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -66,11 +66,12 @@ static inline void rate_control_rate_init(struct sta_info *sta)
}
sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
- rcu_read_unlock();
ieee80211_sta_set_rx_nss(sta);
- ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
+ ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+ priv_sta);
+ rcu_read_unlock();
set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
}
@@ -81,10 +82,21 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ if (ref && ref->ops->rate_update) {
+ rcu_read_lock();
- if (ref && ref->ops->rate_update)
- ref->ops->rate_update(ref->priv, sband, ista,
- priv_sta, changed);
+ chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+ ista, priv_sta, changed);
+ rcu_read_unlock();
+ }
drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index e6512e2ffd20..8b5f7ef7c0c9 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -383,14 +383,18 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
static void
calc_rate_durations(enum ieee80211_band band,
struct minstrel_rate *d,
- struct ieee80211_rate *rate)
+ struct ieee80211_rate *rate,
+ struct cfg80211_chan_def *chandef)
{
int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
+ int shift = ieee80211_chandef_get_shift(chandef);
d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
- rate->bitrate, erp, 1);
+ DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
+ shift);
d->ack_time = ieee80211_frame_duration(band, 10,
- rate->bitrate, erp, 1);
+ DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
+ shift);
}
static void
@@ -418,21 +422,25 @@ init_sample_table(struct minstrel_sta_info *mi)
static void
minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_sta_info *mi = priv_sta;
struct minstrel_priv *mp = priv;
struct ieee80211_rate *ctl_rate;
unsigned int i, n = 0;
unsigned int t_slot = 9; /* FIXME: get real slot time */
+ u32 rate_flags;
mi->sta = sta;
mi->lowest_rix = rate_lowest_index(sband, sta);
ctl_rate = &sband->bitrates[mi->lowest_rix];
mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
ctl_rate->bitrate,
- !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
+ !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1,
+ ieee80211_chandef_get_shift(chandef));
+ rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
memset(mi->max_tp_rate, 0, sizeof(mi->max_tp_rate));
mi->max_prob_rate = 0;
@@ -441,15 +449,22 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
unsigned int tx_time_single;
unsigned int cw = mp->cw_min;
+ int shift;
if (!rate_supported(sta, sband->band, i))
continue;
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
n++;
memset(mr, 0, sizeof(*mr));
mr->rix = i;
- mr->bitrate = sband->bitrates[i].bitrate / 5;
- calc_rate_durations(sband->band, mr, &sband->bitrates[i]);
+ shift = ieee80211_chandef_get_shift(chandef);
+ mr->bitrate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ (1 << shift) * 5);
+ calc_rate_durations(sband->band, mr, &sband->bitrates[i],
+ chandef);
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */
@@ -547,6 +562,7 @@ minstrel_init_cck_rates(struct minstrel_priv *mp)
{
static const int bitrates[4] = { 10, 20, 55, 110 };
struct ieee80211_supported_band *sband;
+ u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
int i, j;
sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
@@ -559,6 +575,9 @@ minstrel_init_cck_rates(struct minstrel_priv *mp)
if (rate->flags & IEEE80211_RATE_ERP_G)
continue;
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
if (rate->bitrate != bitrates[j])
continue;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index f3bbea1eb9e7..7c323f27ba23 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -776,7 +776,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
/* Don't use EAPOL frames for sampling on non-mrr hw */
if (mp->hw->max_rates == 1 &&
- txrc->skb->protocol == cpu_to_be16(ETH_P_PAE))
+ (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
sample_idx = -1;
else
sample_idx = minstrel_get_sample_rate(mp, mi);
@@ -847,6 +847,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_priv *mp = priv;
@@ -872,8 +873,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
mi->sta = sta;
mi->stats_update = jiffies;
- ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1);
- mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur;
+ ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
+ mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
+ mi->overhead += ack_dur;
mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
@@ -942,22 +944,25 @@ use_legacy:
memset(&msp->legacy, 0, sizeof(msp->legacy));
msp->legacy.r = msp->ratelist;
msp->legacy.sample_table = msp->sample_table;
- return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy);
+ return mac80211_minstrel.rate_init(priv, sband, chandef, sta,
+ &msp->legacy);
}
static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
- minstrel_ht_update_caps(priv, sband, sta, priv_sta);
+ minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed)
{
- minstrel_ht_update_caps(priv, sband, sta, priv_sta);
+ minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void *
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 502d3ecc4a79..958fad07b54c 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -293,6 +293,7 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
static void
rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct rc_pid_sta_info *spinfo = priv_sta;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2c5a79bd3777..54395d7583ba 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -87,11 +87,13 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
int len;
/* always present fields */
- len = sizeof(struct ieee80211_radiotap_header) + 9;
+ len = sizeof(struct ieee80211_radiotap_header) + 8;
- /* allocate extra bitmap */
+ /* allocate extra bitmaps */
if (status->vendor_radiotap_len)
len += 4;
+ if (status->chains)
+ len += 4 * hweight8(status->chains);
if (ieee80211_have_rx_timestamp(status)) {
len = ALIGN(len, 8);
@@ -100,6 +102,10 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
len += 1;
+ /* antenna field, if we don't have per-chain info */
+ if (!status->chains)
+ len += 1;
+
/* padding for RX_FLAGS if necessary */
len = ALIGN(len, 2);
@@ -116,6 +122,11 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
len += 12;
}
+ if (status->chains) {
+ /* antenna and antenna signal fields */
+ len += 2 * hweight8(status->chains);
+ }
+
if (status->vendor_radiotap_len) {
if (WARN_ON_ONCE(status->vendor_radiotap_align == 0))
status->vendor_radiotap_align = 1;
@@ -145,8 +156,12 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_header *rthdr;
unsigned char *pos;
+ __le32 *it_present;
+ u32 it_present_val;
u16 rx_flags = 0;
- int mpdulen;
+ u16 channel_flags = 0;
+ int mpdulen, chain;
+ unsigned long chains = status->chains;
mpdulen = skb->len;
if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
@@ -154,25 +169,39 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
memset(rthdr, 0, rtap_len);
+ it_present = &rthdr->it_present;
/* radiotap header, set always present flags */
- rthdr->it_present =
- cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
- (1 << IEEE80211_RADIOTAP_CHANNEL) |
- (1 << IEEE80211_RADIOTAP_ANTENNA) |
- (1 << IEEE80211_RADIOTAP_RX_FLAGS));
rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len);
+ it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
+ BIT(IEEE80211_RADIOTAP_CHANNEL) |
+ BIT(IEEE80211_RADIOTAP_RX_FLAGS);
+
+ if (!status->chains)
+ it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
- pos = (unsigned char *)(rthdr + 1);
+ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
+ it_present_val |=
+ BIT(IEEE80211_RADIOTAP_EXT) |
+ BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
+ put_unaligned_le32(it_present_val, it_present);
+ it_present++;
+ it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
+ BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
+ }
if (status->vendor_radiotap_len) {
- rthdr->it_present |=
- cpu_to_le32(BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) |
- cpu_to_le32(BIT(IEEE80211_RADIOTAP_EXT));
- put_unaligned_le32(status->vendor_radiotap_bitmap, pos);
- pos += 4;
+ it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
+ BIT(IEEE80211_RADIOTAP_EXT);
+ put_unaligned_le32(it_present_val, it_present);
+ it_present++;
+ it_present_val = status->vendor_radiotap_bitmap;
}
+ put_unaligned_le32(it_present_val, it_present);
+
+ pos = (void *)(it_present + 1);
+
/* the order of the following fields is important */
/* IEEE80211_RADIOTAP_TSFT */
@@ -207,28 +236,35 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*/
*pos = 0;
} else {
+ int shift = 0;
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
- *pos = rate->bitrate / 5;
+ if (status->flag & RX_FLAG_10MHZ)
+ shift = 1;
+ else if (status->flag & RX_FLAG_5MHZ)
+ shift = 2;
+ *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
}
pos++;
/* IEEE80211_RADIOTAP_CHANNEL */
put_unaligned_le16(status->freq, pos);
pos += 2;
+ if (status->flag & RX_FLAG_10MHZ)
+ channel_flags |= IEEE80211_CHAN_HALF;
+ else if (status->flag & RX_FLAG_5MHZ)
+ channel_flags |= IEEE80211_CHAN_QUARTER;
+
if (status->band == IEEE80211_BAND_5GHZ)
- put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
- put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
- put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
else if (rate)
- put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
else
- put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
+ channel_flags |= IEEE80211_CHAN_2GHZ;
+ put_unaligned_le16(channel_flags, pos);
pos += 2;
/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
@@ -242,9 +278,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
- /* IEEE80211_RADIOTAP_ANTENNA */
- *pos = status->antenna;
- pos++;
+ if (!status->chains) {
+ /* IEEE80211_RADIOTAP_ANTENNA */
+ *pos = status->antenna;
+ pos++;
+ }
/* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
@@ -341,6 +379,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos += 2;
}
+ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
+ *pos++ = status->chain_signal[chain];
+ *pos++ = chain;
+ }
+
if (status->vendor_radiotap_len) {
/* ensure 2 byte alignment for the vendor field as required */
if ((pos - (u8 *)rthdr) & 1)
@@ -1012,207 +1055,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
-{
- struct sk_buff *skb = rx->skb;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int keyidx;
- int hdrlen;
- ieee80211_rx_result result = RX_DROP_UNUSABLE;
- struct ieee80211_key *sta_ptk = NULL;
- int mmie_keyidx = -1;
- __le16 fc;
-
- /*
- * Key selection 101
- *
- * There are four types of keys:
- * - GTK (group keys)
- * - IGTK (group keys for management frames)
- * - PTK (pairwise keys)
- * - STK (station-to-station pairwise keys)
- *
- * When selecting a key, we have to distinguish between multicast
- * (including broadcast) and unicast frames, the latter can only
- * use PTKs and STKs while the former always use GTKs and IGTKs.
- * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
- * unicast frames can also use key indices like GTKs. Hence, if we
- * don't have a PTK/STK we check the key index for a WEP key.
- *
- * Note that in a regular BSS, multicast frames are sent by the
- * AP only, associated stations unicast the frame to the AP first
- * which then multicasts it on their behalf.
- *
- * There is also a slight problem in IBSS mode: GTKs are negotiated
- * with each station, that is something we don't currently handle.
- * The spec seems to expect that one negotiates the same key with
- * every station but there's no such requirement; VLANs could be
- * possible.
- */
-
- /*
- * No point in finding a key and decrypting if the frame is neither
- * addressed to us nor a multicast frame.
- */
- if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
- return RX_CONTINUE;
-
- /* start without a key */
- rx->key = NULL;
-
- if (rx->sta)
- sta_ptk = rcu_dereference(rx->sta->ptk);
-
- fc = hdr->frame_control;
-
- if (!ieee80211_has_protected(fc))
- mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
-
- if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
- rx->key = sta_ptk;
- if ((status->flag & RX_FLAG_DECRYPTED) &&
- (status->flag & RX_FLAG_IV_STRIPPED))
- return RX_CONTINUE;
- /* Skip decryption if the frame is not protected. */
- if (!ieee80211_has_protected(fc))
- return RX_CONTINUE;
- } else if (mmie_keyidx >= 0) {
- /* Broadcast/multicast robust management frame / BIP */
- if ((status->flag & RX_FLAG_DECRYPTED) &&
- (status->flag & RX_FLAG_IV_STRIPPED))
- return RX_CONTINUE;
-
- if (mmie_keyidx < NUM_DEFAULT_KEYS ||
- mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
- return RX_DROP_MONITOR; /* unexpected BIP keyidx */
- if (rx->sta)
- rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
- if (!rx->key)
- rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
- } else if (!ieee80211_has_protected(fc)) {
- /*
- * The frame was not protected, so skip decryption. However, we
- * need to set rx->key if there is a key that could have been
- * used so that the frame may be dropped if encryption would
- * have been expected.
- */
- struct ieee80211_key *key = NULL;
- struct ieee80211_sub_if_data *sdata = rx->sdata;
- int i;
-
- if (ieee80211_is_mgmt(fc) &&
- is_multicast_ether_addr(hdr->addr1) &&
- (key = rcu_dereference(rx->sdata->default_mgmt_key)))
- rx->key = key;
- else {
- if (rx->sta) {
- for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
- key = rcu_dereference(rx->sta->gtk[i]);
- if (key)
- break;
- }
- }
- if (!key) {
- for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
- key = rcu_dereference(sdata->keys[i]);
- if (key)
- break;
- }
- }
- if (key)
- rx->key = key;
- }
- return RX_CONTINUE;
- } else {
- u8 keyid;
- /*
- * The device doesn't give us the IV so we won't be
- * able to look up the key. That's ok though, we
- * don't need to decrypt the frame, we just won't
- * be able to keep statistics accurate.
- * Except for key threshold notifications, should
- * we somehow allow the driver to tell us which key
- * the hardware used if this flag is set?
- */
- if ((status->flag & RX_FLAG_DECRYPTED) &&
- (status->flag & RX_FLAG_IV_STRIPPED))
- return RX_CONTINUE;
-
- hdrlen = ieee80211_hdrlen(fc);
-
- if (rx->skb->len < 8 + hdrlen)
- return RX_DROP_UNUSABLE; /* TODO: count this? */
-
- /*
- * no need to call ieee80211_wep_get_keyidx,
- * it verifies a bunch of things we've done already
- */
- skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
- keyidx = keyid >> 6;
-
- /* check per-station GTK first, if multicast packet */
- if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
- rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
-
- /* if not found, try default key */
- if (!rx->key) {
- rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
-
- /*
- * RSNA-protected unicast frames should always be
- * sent with pairwise or station-to-station keys,
- * but for WEP we allow using a key index as well.
- */
- if (rx->key &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
- !is_multicast_ether_addr(hdr->addr1))
- rx->key = NULL;
- }
- }
-
- if (rx->key) {
- if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
- return RX_DROP_MONITOR;
-
- rx->key->tx_rx_count++;
- /* TODO: add threshold stuff again */
- } else {
- return RX_DROP_MONITOR;
- }
-
- switch (rx->key->conf.cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- result = ieee80211_crypto_wep_decrypt(rx);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- result = ieee80211_crypto_tkip_decrypt(rx);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- result = ieee80211_crypto_ccmp_decrypt(rx);
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- result = ieee80211_crypto_aes_cmac_decrypt(rx);
- break;
- default:
- /*
- * We can reach here only with HW-only algorithms
- * but why didn't it decrypt the frame?!
- */
- return RX_DROP_UNUSABLE;
- }
-
- /* the hdr variable is invalid after the decrypt handlers */
-
- /* either the frame has been decrypted or will be dropped */
- status->flag |= RX_FLAG_DECRYPTED;
-
- return result;
-}
-
-static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
{
struct ieee80211_local *local;
@@ -1513,6 +1355,207 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
} /* ieee80211_rx_h_sta_process */
+static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
+{
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int keyidx;
+ int hdrlen;
+ ieee80211_rx_result result = RX_DROP_UNUSABLE;
+ struct ieee80211_key *sta_ptk = NULL;
+ int mmie_keyidx = -1;
+ __le16 fc;
+
+ /*
+ * Key selection 101
+ *
+ * There are four types of keys:
+ * - GTK (group keys)
+ * - IGTK (group keys for management frames)
+ * - PTK (pairwise keys)
+ * - STK (station-to-station pairwise keys)
+ *
+ * When selecting a key, we have to distinguish between multicast
+ * (including broadcast) and unicast frames, the latter can only
+ * use PTKs and STKs while the former always use GTKs and IGTKs.
+ * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
+ * unicast frames can also use key indices like GTKs. Hence, if we
+ * don't have a PTK/STK we check the key index for a WEP key.
+ *
+ * Note that in a regular BSS, multicast frames are sent by the
+ * AP only, associated stations unicast the frame to the AP first
+ * which then multicasts it on their behalf.
+ *
+ * There is also a slight problem in IBSS mode: GTKs are negotiated
+ * with each station, that is something we don't currently handle.
+ * The spec seems to expect that one negotiates the same key with
+ * every station but there's no such requirement; VLANs could be
+ * possible.
+ */
+
+ /*
+ * No point in finding a key and decrypting if the frame is neither
+ * addressed to us nor a multicast frame.
+ */
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ return RX_CONTINUE;
+
+ /* start without a key */
+ rx->key = NULL;
+
+ if (rx->sta)
+ sta_ptk = rcu_dereference(rx->sta->ptk);
+
+ fc = hdr->frame_control;
+
+ if (!ieee80211_has_protected(fc))
+ mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
+
+ if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
+ rx->key = sta_ptk;
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
+ return RX_CONTINUE;
+ /* Skip decryption if the frame is not protected. */
+ if (!ieee80211_has_protected(fc))
+ return RX_CONTINUE;
+ } else if (mmie_keyidx >= 0) {
+ /* Broadcast/multicast robust management frame / BIP */
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
+ return RX_CONTINUE;
+
+ if (mmie_keyidx < NUM_DEFAULT_KEYS ||
+ mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
+ return RX_DROP_MONITOR; /* unexpected BIP keyidx */
+ if (rx->sta)
+ rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
+ if (!rx->key)
+ rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
+ } else if (!ieee80211_has_protected(fc)) {
+ /*
+ * The frame was not protected, so skip decryption. However, we
+ * need to set rx->key if there is a key that could have been
+ * used so that the frame may be dropped if encryption would
+ * have been expected.
+ */
+ struct ieee80211_key *key = NULL;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ int i;
+
+ if (ieee80211_is_mgmt(fc) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ (key = rcu_dereference(rx->sdata->default_mgmt_key)))
+ rx->key = key;
+ else {
+ if (rx->sta) {
+ for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ key = rcu_dereference(rx->sta->gtk[i]);
+ if (key)
+ break;
+ }
+ }
+ if (!key) {
+ for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ key = rcu_dereference(sdata->keys[i]);
+ if (key)
+ break;
+ }
+ }
+ if (key)
+ rx->key = key;
+ }
+ return RX_CONTINUE;
+ } else {
+ u8 keyid;
+ /*
+ * The device doesn't give us the IV so we won't be
+ * able to look up the key. That's ok though, we
+ * don't need to decrypt the frame, we just won't
+ * be able to keep statistics accurate.
+ * Except for key threshold notifications, should
+ * we somehow allow the driver to tell us which key
+ * the hardware used if this flag is set?
+ */
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
+ return RX_CONTINUE;
+
+ hdrlen = ieee80211_hdrlen(fc);
+
+ if (rx->skb->len < 8 + hdrlen)
+ return RX_DROP_UNUSABLE; /* TODO: count this? */
+
+ /*
+ * no need to call ieee80211_wep_get_keyidx,
+ * it verifies a bunch of things we've done already
+ */
+ skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
+ keyidx = keyid >> 6;
+
+ /* check per-station GTK first, if multicast packet */
+ if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
+ rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
+
+ /* if not found, try default key */
+ if (!rx->key) {
+ rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
+
+ /*
+ * RSNA-protected unicast frames should always be
+ * sent with pairwise or station-to-station keys,
+ * but for WEP we allow using a key index as well.
+ */
+ if (rx->key &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
+ !is_multicast_ether_addr(hdr->addr1))
+ rx->key = NULL;
+ }
+ }
+
+ if (rx->key) {
+ if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
+ return RX_DROP_MONITOR;
+
+ rx->key->tx_rx_count++;
+ /* TODO: add threshold stuff again */
+ } else {
+ return RX_DROP_MONITOR;
+ }
+
+ switch (rx->key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ result = ieee80211_crypto_wep_decrypt(rx);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ result = ieee80211_crypto_tkip_decrypt(rx);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ result = ieee80211_crypto_ccmp_decrypt(rx);
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ result = ieee80211_crypto_aes_cmac_decrypt(rx);
+ break;
+ default:
+ /*
+ * We can reach here only with HW-only algorithms
+ * but why didn't it decrypt the frame?!
+ */
+ return RX_DROP_UNUSABLE;
+ }
+
+ /* the hdr variable is invalid after the decrypt handlers */
+
+ /* either the frame has been decrypted or will be dropped */
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ return result;
+}
+
static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
unsigned int frag, unsigned int seq, int rx_queue,
@@ -2641,8 +2684,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
sig = status->signal;
if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
- rx->skb->data, rx->skb->len,
- GFP_ATOMIC)) {
+ rx->skb->data, rx->skb->len, 0, GFP_ATOMIC)) {
if (rx->sta)
rx->sta->rx_packets++;
dev_kfree_skb(rx->skb);
@@ -2896,10 +2938,10 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
*/
rx->skb = skb;
- CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data)
CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
CALL_RXH(ieee80211_rx_h_sta_process)
+ CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_defragment)
CALL_RXH(ieee80211_rx_h_michael_mic_verify)
/* must be after MMIC verify so header is counted in MPDU mic */
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 1b122a79b0d8..08afe74b98f4 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -66,6 +66,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
int clen, srlen;
+ enum nl80211_bss_scan_width scan_width;
s32 signal = 0;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
@@ -73,8 +74,15 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
signal = (rx_status->signal * 100) / local->hw.max_signal;
- cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
- mgmt, len, signal, GFP_ATOMIC);
+ scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ if (rx_status->flag & RX_FLAG_5MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ if (rx_status->flag & RX_FLAG_10MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_10;
+
+ cbss = cfg80211_inform_bss_width_frame(local->hw.wiphy, channel,
+ scan_width, mgmt, len, signal,
+ GFP_ATOMIC);
if (!cbss)
return NULL;
@@ -204,10 +212,29 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
ieee80211_rx_bss_put(local, bss);
}
+static void
+ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef,
+ enum nl80211_bss_scan_width scan_width)
+{
+ memset(chandef, 0, sizeof(*chandef));
+ switch (scan_width) {
+ case NL80211_BSS_CHAN_WIDTH_5:
+ chandef->width = NL80211_CHAN_WIDTH_5;
+ break;
+ case NL80211_BSS_CHAN_WIDTH_10:
+ chandef->width = NL80211_CHAN_WIDTH_10;
+ break;
+ default:
+ chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+ break;
+ }
+}
+
/* return false if no more work */
static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
{
struct cfg80211_scan_request *req = local->scan_req;
+ struct cfg80211_chan_def chandef;
enum ieee80211_band band;
int i, ielen, n_chans;
@@ -229,11 +256,12 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
} while (!n_chans);
local->hw_scan_req->n_channels = n_chans;
+ ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
local->hw_scan_ies_bufsize,
req->ie, req->ie_len, band,
- req->rates[band], 0);
+ req->rates[band], &chandef);
local->hw_scan_req->ie_len = ielen;
local->hw_scan_req->no_cck = req->no_cck;
@@ -280,7 +308,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
rcu_assign_pointer(local->scan_sdata, NULL);
local->scanning = 0;
- local->scan_channel = NULL;
+ local->scan_chandef.chan = NULL;
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
@@ -615,11 +643,34 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
{
int skip;
struct ieee80211_channel *chan;
+ enum nl80211_bss_scan_width oper_scan_width;
skip = 0;
chan = local->scan_req->channels[local->scan_channel_idx];
- local->scan_channel = chan;
+ local->scan_chandef.chan = chan;
+ local->scan_chandef.center_freq1 = chan->center_freq;
+ local->scan_chandef.center_freq2 = 0;
+ switch (local->scan_req->scan_width) {
+ case NL80211_BSS_CHAN_WIDTH_5:
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_5;
+ break;
+ case NL80211_BSS_CHAN_WIDTH_10:
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_10;
+ break;
+ case NL80211_BSS_CHAN_WIDTH_20:
+ /* If scanning on oper channel, use whatever channel-type
+ * is currently in use.
+ */
+ oper_scan_width = cfg80211_chandef_to_scan_width(
+ &local->_oper_chandef);
+ if (chan == local->_oper_chandef.chan &&
+ oper_scan_width == local->scan_req->scan_width)
+ local->scan_chandef = local->_oper_chandef;
+ else
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+ break;
+ }
if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
skip = 1;
@@ -659,7 +710,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
unsigned long *next_delay)
{
/* switch back to the operating channel */
- local->scan_channel = NULL;
+ local->scan_chandef.chan = NULL;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/* disable PS */
@@ -801,7 +852,8 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan)
+ struct ieee80211_channel *chan,
+ enum nl80211_bss_scan_width scan_width)
{
struct ieee80211_local *local = sdata->local;
int ret = -EBUSY;
@@ -851,6 +903,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
local->int_scan_req->ssids = &local->scan_ssid;
local->int_scan_req->n_ssids = 1;
+ local->int_scan_req->scan_width = scan_width;
memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
local->int_scan_req->ssids[0].ssid_len = ssid_len;
@@ -912,6 +965,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sched_scan_ies sched_scan_ies = {};
+ struct cfg80211_chan_def chandef;
int ret, i, iebufsz;
iebufsz = 2 + IEEE80211_MAX_SSID_LEN +
@@ -939,10 +993,12 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
goto out_free;
}
+ ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
+
sched_scan_ies.len[i] =
ieee80211_build_preq_ies(local, sched_scan_ies.ie[i],
iebufsz, req->ie, req->ie_len,
- i, (u32) -1, 0);
+ i, (u32) -1, &chandef);
}
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 43439203f4e4..368837fe3b80 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -235,7 +235,8 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
/* IEEE80211_RADIOTAP_RATE rate */
if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
+ !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_VHT_MCS)))
len += 2;
/* IEEE80211_RADIOTAP_TX_FLAGS */
@@ -244,17 +245,23 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
/* IEEE80211_RADIOTAP_DATA_RETRIES */
len += 1;
- /* IEEE80211_TX_RC_MCS */
- if (info->status.rates[0].idx >= 0 &&
- info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
- len += 3;
+ /* IEEE80211_RADIOTAP_MCS
+ * IEEE80211_RADIOTAP_VHT */
+ if (info->status.rates[0].idx >= 0) {
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
+ len += 3;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS)
+ len = ALIGN(len, 2) + 12;
+ }
return len;
}
-static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
- *sband, struct sk_buff *skb,
- int retry_count, int rtap_len)
+static void
+ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
+ struct ieee80211_supported_band *sband,
+ struct sk_buff *skb, int retry_count,
+ int rtap_len, int shift)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -279,9 +286,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
/* IEEE80211_RADIOTAP_RATE */
if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_VHT_MCS))) {
+ u16 rate;
+
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
- *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
+ rate = sband->bitrates[info->status.rates[0].idx].bitrate;
+ *pos = DIV_ROUND_UP(rate, 5 * (1 << shift));
/* padding for tx flags */
pos += 2;
}
@@ -306,9 +317,12 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
*pos = retry_count;
pos++;
- /* IEEE80211_TX_RC_MCS */
- if (info->status.rates[0].idx >= 0 &&
- info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
+ if (info->status.rates[0].idx < 0)
+ return;
+
+ /* IEEE80211_RADIOTAP_MCS
+ * IEEE80211_RADIOTAP_VHT */
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
IEEE80211_RADIOTAP_MCS_HAVE_GI |
@@ -321,8 +335,48 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
pos[2] = info->status.rates[0].idx;
pos += 3;
- }
+ } else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
+ u16 known = local->hw.radiotap_vht_details &
+ (IEEE80211_RADIOTAP_VHT_KNOWN_GI |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH);
+
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
+
+ /* required alignment from rthdr */
+ pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2);
+ /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */
+ put_unaligned_le16(known, pos);
+ pos += 2;
+
+ /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
+ pos++;
+
+ /* u8 bandwidth */
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *pos = 1;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ *pos = 4;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ *pos = 11;
+ else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */
+ *pos = 0;
+ pos++;
+
+ /* u8 mcs_nss[4] */
+ *pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) |
+ ieee80211_rate_get_vht_nss(&info->status.rates[0]);
+ pos += 4;
+
+ /* u8 coding */
+ pos++;
+ /* u8 group_id */
+ pos++;
+ /* u16 partial_aid */
+ pos += 2;
+ }
}
static void ieee80211_report_used_skb(struct ieee80211_local *local,
@@ -424,6 +478,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
bool acked;
struct ieee80211_bar *bar;
int rtap_len;
+ int shift = 0;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
@@ -458,6 +513,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
continue;
+ shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+
if (info->flags & IEEE80211_TX_STATUS_EOSP)
clear_sta_flag(sta, WLAN_STA_SP);
@@ -557,7 +614,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rcu_read_unlock();
- ieee80211_led_tx(local, 0);
+ ieee80211_led_tx(local);
/* SNMP counters
* Fragments are passed to low-level drivers as separate skbs, so these
@@ -624,7 +681,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb(skb);
return;
}
- ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
+ ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
+ rtap_len, shift);
/* XXX: is this sufficient for BPF? */
skb_set_mac_header(skb, 0);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index c215fafd7a2f..1aba645882bd 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1906,6 +1906,32 @@ TRACE_EVENT(api_radar_detected,
)
);
+TRACE_EVENT(drv_channel_switch_beacon,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_chan_def *chandef),
+
+ TP_ARGS(local, sdata, chandef),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ CHANDEF_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ CHANDEF_ASSIGN(chandef);
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " channel switch to " CHANDEF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG
+ )
+);
+
+
#ifdef CONFIG_MAC80211_MESSAGE_TRACING
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211_msg
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4105d0ca963e..3456c0486b48 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -40,12 +40,22 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int group_addr,
int next_frag_len)
{
- int rate, mrate, erp, dur, i;
+ int rate, mrate, erp, dur, i, shift = 0;
struct ieee80211_rate *txrate;
struct ieee80211_local *local = tx->local;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ u32 rate_flags = 0;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
+ if (chanctx_conf) {
+ shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
+ rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
+ }
+ rcu_read_unlock();
/* assume HW handles this */
if (tx->rate.flags & IEEE80211_TX_RC_MCS)
@@ -122,8 +132,11 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (r->bitrate > txrate->bitrate)
break;
+ if ((rate_flags & r->flags) != rate_flags)
+ continue;
+
if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
- rate = r->bitrate;
+ rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
switch (sband->band) {
case IEEE80211_BAND_2GHZ: {
@@ -150,7 +163,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (rate == -1) {
/* No matching basic rate found; use highest suitable mandatory
* PHY rate */
- rate = mrate;
+ rate = DIV_ROUND_UP(mrate, 1 << shift);
}
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
@@ -162,7 +175,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
* (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
* to closest integer */
dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
- tx->sdata->vif.bss_conf.use_short_preamble);
+ tx->sdata->vif.bss_conf.use_short_preamble,
+ shift);
if (next_frag_len) {
/* Frame is fragmented: duration increases with time needed to
@@ -171,7 +185,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
/* next fragment */
dur += ieee80211_frame_duration(sband->band, next_frag_len,
txrate->bitrate, erp,
- tx->sdata->vif.bss_conf.use_short_preamble);
+ tx->sdata->vif.bss_conf.use_short_preamble,
+ shift);
}
return cpu_to_le16(dur);
@@ -524,9 +539,11 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol &&
- tx->sdata->control_port_no_encrypt))
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
+ if (tx->sdata->control_port_no_encrypt)
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+ }
return TX_CONTINUE;
}
@@ -764,9 +781,11 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
/*
* Anything but QoS data that has a sequence number field
* (is long enough) gets a sequence number from the global
- * counter.
+ * counter. QoS data frames with a multicast destination
+ * also use the global counter (802.11-2012 9.3.2.10).
*/
- if (!ieee80211_is_data_qos(hdr->frame_control)) {
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)) {
/* driver should assign sequence number */
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
/* for pure STA mode without beacons, we can do it */
@@ -1257,6 +1276,10 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
+ if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) {
+ vif = &sdata->vif;
+ break;
+ }
sdata = rcu_dereference(local->monitor_sdata);
if (sdata) {
vif = &sdata->vif;
@@ -1281,7 +1304,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
txpending);
ieee80211_tpt_led_trig_tx(local, fc, led_len);
- ieee80211_led_tx(local, 1);
WARN_ON_ONCE(!skb_queue_empty(skbs));
@@ -2320,6 +2342,81 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
return 0;
}
+void ieee80211_csa_finish(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->csa_finalize_work);
+}
+EXPORT_SYMBOL(ieee80211_csa_finish);
+
+static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
+ struct beacon_data *beacon)
+{
+ struct probe_resp *resp;
+ int counter_offset_beacon = sdata->csa_counter_offset_beacon;
+ int counter_offset_presp = sdata->csa_counter_offset_presp;
+
+ /* warn if the driver did not check for/react to csa completeness */
+ if (WARN_ON(((u8 *)beacon->tail)[counter_offset_beacon] == 0))
+ return;
+
+ ((u8 *)beacon->tail)[counter_offset_beacon]--;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP &&
+ counter_offset_presp) {
+ rcu_read_lock();
+ resp = rcu_dereference(sdata->u.ap.probe_resp);
+
+ /* if nl80211 accepted the offset, this should not happen. */
+ if (WARN_ON(!resp)) {
+ rcu_read_unlock();
+ return;
+ }
+ resp->data[counter_offset_presp]--;
+ rcu_read_unlock();
+ }
+}
+
+bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct beacon_data *beacon = NULL;
+ u8 *beacon_data;
+ size_t beacon_data_len;
+ int counter_beacon = sdata->csa_counter_offset_beacon;
+ int ret = false;
+
+ if (!ieee80211_sdata_running(sdata))
+ return false;
+
+ rcu_read_lock();
+ if (vif->type == NL80211_IFTYPE_AP) {
+ struct ieee80211_if_ap *ap = &sdata->u.ap;
+
+ beacon = rcu_dereference(ap->beacon);
+ if (WARN_ON(!beacon || !beacon->tail))
+ goto out;
+ beacon_data = beacon->tail;
+ beacon_data_len = beacon->tail_len;
+ } else {
+ WARN_ON(1);
+ goto out;
+ }
+
+ if (WARN_ON(counter_beacon > beacon_data_len))
+ goto out;
+
+ if (beacon_data[counter_beacon] == 0)
+ ret = true;
+ out:
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(ieee80211_csa_is_complete);
+
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 *tim_offset, u16 *tim_length)
@@ -2350,6 +2447,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct beacon_data *beacon = rcu_dereference(ap->beacon);
if (beacon) {
+ if (sdata->vif.csa_active)
+ ieee80211_update_csa(sdata, beacon);
+
/*
* headroom, head length,
* tail length and maximum TIM length
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 22654452a561..e1b34a18b243 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -107,7 +107,8 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
}
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
- int rate, int erp, int short_preamble)
+ int rate, int erp, int short_preamble,
+ int shift)
{
int dur;
@@ -118,6 +119,9 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
*
* rate is in 100 kbps, so divident is multiplied by 10 in the
* DIV_ROUND_UP() operations.
+ *
+ * shift may be 2 for 5 MHz channels or 1 for 10 MHz channels, and
+ * is assumed to be 0 otherwise.
*/
if (band == IEEE80211_BAND_5GHZ || erp) {
@@ -130,13 +134,23 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
* TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext
*
* T_SYM = 4 usec
- * 802.11a - 17.5.2: aSIFSTime = 16 usec
+ * 802.11a - 18.5.2: aSIFSTime = 16 usec
* 802.11g - 19.8.4: aSIFSTime = 10 usec +
* signal ext = 6 usec
*/
dur = 16; /* SIFS + signal ext */
- dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */
- dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */
+ dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */
+ dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */
+
+ /* IEEE 802.11-2012 18.3.2.4: all values above are:
+ * * times 4 for 5 MHz
+ * * times 2 for 10 MHz
+ */
+ dur *= 1 << shift;
+
+ /* rates should already consider the channel bandwidth,
+ * don't apply divisor again.
+ */
dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10,
4 * rate); /* T_SYM x N_SYM */
} else {
@@ -168,7 +182,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
{
struct ieee80211_sub_if_data *sdata;
u16 dur;
- int erp;
+ int erp, shift = 0;
bool short_preamble = false;
erp = 0;
@@ -177,10 +191,11 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
+ shift = ieee80211_vif_get_shift(vif);
}
dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp,
- short_preamble);
+ short_preamble, shift);
return cpu_to_le16(dur);
}
@@ -194,7 +209,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
struct ieee80211_rate *rate;
struct ieee80211_sub_if_data *sdata;
bool short_preamble;
- int erp;
+ int erp, shift = 0, bitrate;
u16 dur;
struct ieee80211_supported_band *sband;
@@ -210,17 +225,20 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
+ shift = ieee80211_vif_get_shift(vif);
}
+ bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+
/* CTS duration */
- dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate,
- erp, short_preamble);
+ dur = ieee80211_frame_duration(sband->band, 10, bitrate,
+ erp, short_preamble, shift);
/* Data frame duration */
- dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
- erp, short_preamble);
+ dur += ieee80211_frame_duration(sband->band, frame_len, bitrate,
+ erp, short_preamble, shift);
/* ACK duration */
- dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
- erp, short_preamble);
+ dur += ieee80211_frame_duration(sband->band, 10, bitrate,
+ erp, short_preamble, shift);
return cpu_to_le16(dur);
}
@@ -235,7 +253,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
struct ieee80211_rate *rate;
struct ieee80211_sub_if_data *sdata;
bool short_preamble;
- int erp;
+ int erp, shift = 0, bitrate;
u16 dur;
struct ieee80211_supported_band *sband;
@@ -250,15 +268,18 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
+ shift = ieee80211_vif_get_shift(vif);
}
+ bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+
/* Data frame duration */
- dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
- erp, short_preamble);
+ dur = ieee80211_frame_duration(sband->band, frame_len, bitrate,
+ erp, short_preamble, shift);
if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
/* ACK duration */
- dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
- erp, short_preamble);
+ dur += ieee80211_frame_duration(sband->band, 10, bitrate,
+ erp, short_preamble, shift);
}
return cpu_to_le16(dur);
@@ -1052,32 +1073,6 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
}
}
-void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
- const size_t supp_rates_len,
- const u8 *supp_rates)
-{
- struct ieee80211_chanctx_conf *chanctx_conf;
- int i, have_higher_than_11mbit = 0;
-
- /* cf. IEEE 802.11 9.2.12 */
- for (i = 0; i < supp_rates_len; i++)
- if ((supp_rates[i] & 0x7f) * 5 > 110)
- have_higher_than_11mbit = 1;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-
- if (chanctx_conf &&
- chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ &&
- have_higher_than_11mbit)
- sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
- else
- sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
- rcu_read_unlock();
-
- ieee80211_set_wmm_default(sdata, true);
-}
-
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg, u16 status,
const u8 *extra, size_t extra_len, const u8 *da,
@@ -1162,7 +1157,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
size_t buffer_len, const u8 *ie, size_t ie_len,
enum ieee80211_band band, u32 rate_mask,
- u8 channel)
+ struct cfg80211_chan_def *chandef)
{
struct ieee80211_supported_band *sband;
u8 *pos = buffer, *end = buffer + buffer_len;
@@ -1171,16 +1166,26 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
u8 rates[32];
int num_rates;
int ext_rates_len;
+ int shift;
+ u32 rate_flags;
sband = local->hw.wiphy->bands[band];
if (WARN_ON_ONCE(!sband))
return 0;
+ rate_flags = ieee80211_chandef_rate_flags(chandef);
+ shift = ieee80211_chandef_get_shift(chandef);
+
num_rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if ((BIT(i) & rate_mask) == 0)
continue; /* skip rate */
- rates[num_rates++] = (u8) (sband->bitrates[i].bitrate / 5);
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
+ rates[num_rates++] =
+ (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ (1 << shift) * 5);
}
supp_rates_len = min_t(int, num_rates, 8);
@@ -1220,12 +1225,13 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
pos += ext_rates_len;
}
- if (channel && sband->band == IEEE80211_BAND_2GHZ) {
+ if (chandef->chan && sband->band == IEEE80211_BAND_2GHZ) {
if (end - pos < 3)
goto out_err;
*pos++ = WLAN_EID_DS_PARAMS;
*pos++ = 1;
- *pos++ = channel;
+ *pos++ = ieee80211_frequency_to_channel(
+ chandef->chan->center_freq);
}
/* insert custom IEs that go before HT */
@@ -1290,9 +1296,9 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
bool directed)
{
struct ieee80211_local *local = sdata->local;
+ struct cfg80211_chan_def chandef;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 chan_no;
int ies_len;
/*
@@ -1300,10 +1306,11 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
* in order to maximize the chance that we get a response. Some
* badly-behaved APs don't respond when this parameter is included.
*/
+ chandef.width = sdata->vif.bss_conf.chandef.width;
if (directed)
- chan_no = 0;
+ chandef.chan = NULL;
else
- chan_no = ieee80211_frequency_to_channel(chan->center_freq);
+ chandef.chan = chan;
skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
ssid, ssid_len, 100 + ie_len);
@@ -1313,7 +1320,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb),
skb_tailroom(skb),
ie, ie_len, chan->band,
- ratemask, chan_no);
+ ratemask, &chandef);
skb_put(skb, ies_len);
if (dst) {
@@ -1347,16 +1354,19 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
}
}
-u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
+u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum ieee80211_band band, u32 *basic_rates)
{
struct ieee80211_supported_band *sband;
struct ieee80211_rate *bitrates;
size_t num_rates;
- u32 supp_rates;
- int i, j;
- sband = local->hw.wiphy->bands[band];
+ u32 supp_rates, rate_flags;
+ int i, j, shift;
+ sband = sdata->local->hw.wiphy->bands[band];
+
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ shift = ieee80211_vif_get_shift(&sdata->vif);
if (WARN_ON(!sband))
return 1;
@@ -1381,7 +1391,15 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
continue;
for (j = 0; j < num_rates; j++) {
- if (bitrates[j].bitrate == own_rate) {
+ int brate;
+ if ((rate_flags & sband->bitrates[j].flags)
+ != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
+ 1 << shift);
+
+ if (brate == own_rate) {
supp_rates |= BIT(j);
if (basic_rates && is_basic)
*basic_rates |= BIT(j);
@@ -1435,8 +1453,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
local->resuming = true;
if (local->wowlan) {
- local->wowlan = false;
res = drv_resume(local);
+ local->wowlan = false;
if (res < 0) {
local->resuming = false;
return res;
@@ -2004,18 +2022,56 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
cfg80211_chandef_create(chandef, control_chan, channel_type);
}
+int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates)
+{
+ u32 rate_flags = ieee80211_chandef_rate_flags(chandef);
+ int shift = ieee80211_chandef_get_shift(chandef);
+ struct ieee80211_rate *br;
+ int brate, rate, i, j, count = 0;
+
+ *rates = 0;
+
+ for (i = 0; i < srates_len; i++) {
+ rate = srates[i] & 0x7f;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+ if (brate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
+
int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic,
enum ieee80211_band band)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- int rate;
+ int rate, shift;
u8 i, rates, *pos;
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
+ u32 rate_flags;
+ shift = ieee80211_vif_get_shift(&sdata->vif);
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
sband = local->hw.wiphy->bands[band];
- rates = sband->n_bitrates;
+ rates = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ rates++;
+ }
if (rates > 8)
rates = 8;
@@ -2027,10 +2083,15 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
*pos++ = rates;
for (i = 0; i < rates; i++) {
u8 basic = 0;
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
if (need_basic && basic_rates & BIT(i))
basic = 0x80;
rate = sband->bitrates[i].bitrate;
- *pos++ = basic | (u8) (rate / 5);
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = basic | (u8) rate;
}
return 0;
@@ -2042,12 +2103,22 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- int rate;
+ int rate, skip, shift;
u8 i, exrates, *pos;
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
+ u32 rate_flags;
+
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ shift = ieee80211_vif_get_shift(&sdata->vif);
sband = local->hw.wiphy->bands[band];
- exrates = sband->n_bitrates;
+ exrates = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ exrates++;
+ }
+
if (exrates > 8)
exrates -= 8;
else
@@ -2060,12 +2131,19 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, exrates + 2);
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = exrates;
+ skip = 0;
for (i = 8; i < sband->n_bitrates; i++) {
u8 basic = 0;
+ if ((rate_flags & sband->bitrates[i].flags)
+ != rate_flags)
+ continue;
+ if (skip++ < 8)
+ continue;
if (need_basic && basic_rates & BIT(i))
basic = 0x80;
- rate = sband->bitrates[i].bitrate;
- *pos++ = basic | (u8) (rate / 5);
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = basic | (u8) rate;
}
}
return 0;
@@ -2149,9 +2227,17 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
struct ieee80211_supported_band *sband;
+ int shift = 0;
+ int bitrate;
+
+ if (status->flag & RX_FLAG_10MHZ)
+ shift = 1;
+ if (status->flag & RX_FLAG_5MHZ)
+ shift = 2;
sband = local->hw.wiphy->bands[status->band];
- ri.legacy = sband->bitrates[status->rate_idx].bitrate;
+ bitrate = sband->bitrates[status->rate_idx].bitrate;
+ ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift));
}
rate = cfg80211_calculate_bitrate(&ri);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 56d22cae5906..6e839b6dff2b 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -408,21 +408,10 @@ config NF_NAT_TFTP
depends on NF_CONNTRACK && NF_NAT
default NF_NAT && NF_CONNTRACK_TFTP
-endif # NF_CONNTRACK
-
-# transparent proxy support
-config NETFILTER_TPROXY
- tristate "Transparent proxying support"
- depends on IP_NF_MANGLE
- depends on NETFILTER_ADVANCED
- help
- This option enables transparent proxying support, that is,
- support for handling non-locally bound IPv4 TCP and UDP sockets.
- For it to work you will have to configure certain iptables rules
- and use policy routing. For more information on how to set it up
- see Documentation/networking/tproxy.txt.
+config NETFILTER_SYNPROXY
+ tristate
- To compile it as a module, choose M here. If unsure, say N.
+endif # NF_CONNTRACK
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
@@ -720,10 +709,10 @@ config NETFILTER_XT_TARGET_TEE
this clone be rerouted to another nexthop.
config NETFILTER_XT_TARGET_TPROXY
- tristate '"TPROXY" target support'
- depends on NETFILTER_TPROXY
+ tristate '"TPROXY" target transparent proxying support'
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
+ depends on IP_NF_MANGLE
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
@@ -731,6 +720,9 @@ config NETFILTER_XT_TARGET_TPROXY
REDIRECT. It can only be used in the mangle table and is useful
to redirect traffic to a transparent proxy. It does _not_ depend
on Netfilter connection tracking and NAT, unlike REDIRECT.
+ For it to work you will have to configure certain iptables rules
+ and use policy routing. For more information on how to set it up
+ see Documentation/networking/tproxy.txt.
To compile it as a module, choose M here. If unsure, say N.
@@ -1180,10 +1172,10 @@ config NETFILTER_XT_MATCH_SCTP
config NETFILTER_XT_MATCH_SOCKET
tristate '"socket" match support'
- depends on NETFILTER_TPROXY
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
depends on !NF_CONNTRACK || NF_CONNTRACK
+ depends on (IPV6 || IPV6=n)
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index a1abf87d43bf..c3a0a12907f6 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,6 @@
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
-nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
@@ -61,8 +61,8 @@ obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
-# transparent proxy support
-obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
+# SYNPROXY
+obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 2217363ab422..593b16ea45e0 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -234,12 +234,13 @@ EXPORT_SYMBOL(skb_make_writable);
/* This does not belong here, but locally generated errors need it if connection
tracking in use: without this, connection may not be in hash table, and hence
manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
+void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
+ __rcu __read_mostly;
EXPORT_SYMBOL(ip_ct_attach);
-void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
+void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
{
- void (*attach)(struct sk_buff *, struct sk_buff *);
+ void (*attach)(struct sk_buff *, const struct sk_buff *);
if (skb->nfct) {
rcu_read_lock();
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 3cd85b2fc67c..5199448697f6 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -414,7 +414,7 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
spin_lock_bh(&svc->sched_lock);
tbl->dead = 1;
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblcr_free(en);
}
@@ -440,7 +440,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
struct ip_vs_lblcr_entry *en;
struct hlist_node *next;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
@@ -495,7 +495,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
if (goal > tbl->max_size/2)
goal = tbl->max_size/2;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
@@ -536,7 +536,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
/*
* Initialize the hash buckets
*/
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
INIT_HLIST_HEAD(&tbl->bucket[i]);
}
tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 3c0da8728036..23e596e438b3 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -66,15 +66,7 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
unsigned int sctphoff)
{
- __u32 crc32;
- struct sk_buff *iter;
-
- crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
- skb_walk_frags(skb, iter)
- crc32 = sctp_update_cksum((u8 *) iter->data,
- skb_headlen(iter), crc32);
- sctph->checksum = sctp_end_cksum(crc32);
-
+ sctph->checksum = sctp_compute_cksum(skb, sctphoff);
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -151,10 +143,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
unsigned int sctphoff;
struct sctphdr *sh, _sctph;
- struct sk_buff *iter;
- __le32 cmp;
- __le32 val;
- __u32 tmp;
+ __le32 cmp, val;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
@@ -168,13 +157,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
return 0;
cmp = sh->checksum;
-
- tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb));
- skb_walk_frags(skb, iter)
- tmp = sctp_update_cksum((__u8 *) iter->data,
- skb_headlen(iter), tmp);
-
- val = sctp_end_cksum(tmp);
+ val = sctp_compute_cksum(skb, sctphoff);
if (val != cmp) {
/* CRC failure, dump it. */
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index f16c027df15b..3588faebe529 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -269,14 +269,20 @@ ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
switch (iph->protocol) {
case IPPROTO_TCP:
th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+ if (unlikely(th == NULL))
+ return 0;
port = th->source;
break;
case IPPROTO_UDP:
uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
+ if (unlikely(uh == NULL))
+ return 0;
port = uh->source;
break;
case IPPROTO_SCTP:
sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
+ if (unlikely(sh == NULL))
+ return 0;
port = sh->source;
break;
default:
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0283baedcdfb..5d892febd64c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -39,6 +39,7 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
@@ -47,6 +48,7 @@
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
@@ -238,7 +240,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
nf_conntrack_free(ct);
}
-void nf_ct_delete_from_lists(struct nf_conn *ct)
+static void nf_ct_delete_from_lists(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
@@ -253,7 +255,6 @@ void nf_ct_delete_from_lists(struct nf_conn *ct)
&net->ct.dying);
spin_unlock_bh(&nf_conntrack_lock);
}
-EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
static void death_by_event(unsigned long ul_conntrack)
{
@@ -275,7 +276,7 @@ static void death_by_event(unsigned long ul_conntrack)
nf_ct_put(ct);
}
-void nf_ct_dying_timeout(struct nf_conn *ct)
+static void nf_ct_dying_timeout(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
@@ -288,27 +289,33 @@ void nf_ct_dying_timeout(struct nf_conn *ct)
(prandom_u32() % net->ct.sysctl_events_retry_timeout);
add_timer(&ecache->timeout);
}
-EXPORT_SYMBOL_GPL(nf_ct_dying_timeout);
-static void death_by_timeout(unsigned long ul_conntrack)
+bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{
- struct nf_conn *ct = (void *)ul_conntrack;
struct nf_conn_tstamp *tstamp;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp && tstamp->stop == 0)
tstamp->stop = ktime_to_ns(ktime_get_real());
- if (!test_bit(IPS_DYING_BIT, &ct->status) &&
- unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
+ if (!nf_ct_is_dying(ct) &&
+ unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct,
+ portid, report) < 0)) {
/* destroy event was not delivered */
nf_ct_delete_from_lists(ct);
nf_ct_dying_timeout(ct);
- return;
+ return false;
}
set_bit(IPS_DYING_BIT, &ct->status);
nf_ct_delete_from_lists(ct);
nf_ct_put(ct);
+ return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_delete);
+
+static void death_by_timeout(unsigned long ul_conntrack)
+{
+ nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
}
/*
@@ -643,10 +650,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
return dropped;
if (del_timer(&ct->timeout)) {
- death_by_timeout((unsigned long)ct);
- /* Check if we indeed killed this entry. Reliable event
- delivery may have inserted it into the dying list. */
- if (test_bit(IPS_DYING_BIT, &ct->status)) {
+ if (nf_ct_delete(ct, 0, 0)) {
dropped = 1;
NF_CT_STAT_INC_ATOMIC(net, early_drop);
}
@@ -796,6 +800,11 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
+ if (tmpl && nfct_synproxy(tmpl)) {
+ nfct_seqadj_ext_add(ct);
+ nfct_synproxy_ext_add(ct);
+ }
+
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
if (timeout_ext)
timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
@@ -1192,7 +1201,7 @@ EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
#endif
/* Used by ipt_REJECT and ip6t_REJECT. */
-static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
+static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -1244,7 +1253,7 @@ found:
void nf_ct_iterate_cleanup(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
- void *data)
+ void *data, u32 portid, int report)
{
struct nf_conn *ct;
unsigned int bucket = 0;
@@ -1252,7 +1261,8 @@ void nf_ct_iterate_cleanup(struct net *net,
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
/* Time to push up daises... */
if (del_timer(&ct->timeout))
- death_by_timeout((unsigned long)ct);
+ nf_ct_delete(ct, portid, report);
+
/* ... else the timer will get him soon. */
nf_ct_put(ct);
@@ -1260,30 +1270,6 @@ void nf_ct_iterate_cleanup(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
-struct __nf_ct_flush_report {
- u32 portid;
- int report;
-};
-
-static int kill_report(struct nf_conn *i, void *data)
-{
- struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
- struct nf_conn_tstamp *tstamp;
-
- tstamp = nf_conn_tstamp_find(i);
- if (tstamp && tstamp->stop == 0)
- tstamp->stop = ktime_to_ns(ktime_get_real());
-
- /* If we fail to deliver the event, death_by_timeout() will retry */
- if (nf_conntrack_event_report(IPCT_DESTROY, i,
- fr->portid, fr->report) < 0)
- return 1;
-
- /* Avoid the delivery of the destroy event in death_by_timeout(). */
- set_bit(IPS_DYING_BIT, &i->status);
- return 1;
-}
-
static int kill_all(struct nf_conn *i, void *data)
{
return 1;
@@ -1301,11 +1287,7 @@ EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
{
- struct __nf_ct_flush_report fr = {
- .portid = portid,
- .report = report,
- };
- nf_ct_iterate_cleanup(net, kill_report, &fr);
+ nf_ct_iterate_cleanup(net, kill_all, NULL, portid, report);
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
@@ -1351,6 +1333,7 @@ void nf_conntrack_cleanup_end(void)
nf_ct_extend_unregister(&nf_ct_zone_extend);
#endif
nf_conntrack_proto_fini();
+ nf_conntrack_seqadj_fini();
nf_conntrack_labels_fini();
nf_conntrack_helper_fini();
nf_conntrack_timeout_fini();
@@ -1386,7 +1369,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
- nf_ct_iterate_cleanup(net, kill_all, NULL);
+ nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
nf_ct_release_dying_list(net);
if (atomic_read(&net->ct.count) != 0)
busy = 1;
@@ -1556,6 +1539,10 @@ int nf_conntrack_init_start(void)
if (ret < 0)
goto err_labels;
+ ret = nf_conntrack_seqadj_init();
+ if (ret < 0)
+ goto err_seqadj;
+
#ifdef CONFIG_NF_CONNTRACK_ZONES
ret = nf_ct_extend_register(&nf_ct_zone_extend);
if (ret < 0)
@@ -1580,6 +1567,8 @@ err_proto:
nf_ct_extend_unregister(&nf_ct_zone_extend);
err_extend:
#endif
+ nf_conntrack_seqadj_fini();
+err_seqadj:
nf_conntrack_labels_fini();
err_labels:
nf_conntrack_helper_fini();
@@ -1602,9 +1591,6 @@ void nf_conntrack_init_end(void)
/* For use by REJECT target */
RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
-
- /* Howto get NAT offsets */
- RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
}
/*
@@ -1691,8 +1677,3 @@ err_slabname:
err_stat:
return ret;
}
-
-s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq);
-EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index 355d2ef08094..bb53f120e79c 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -8,12 +8,8 @@
* published by the Free Software Foundation.
*/
-#include <linux/ctype.h>
#include <linux/export.h>
-#include <linux/jhash.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_labels.h>
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index edc410e778f7..eea936b70d15 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -37,6 +37,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
@@ -381,9 +382,8 @@ nla_put_failure:
return -1;
}
-#ifdef CONFIG_NF_NAT_NEEDED
static int
-dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
+dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
{
struct nlattr *nest_parms;
@@ -391,12 +391,12 @@ dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
if (!nest_parms)
goto nla_put_failure;
- if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
- htonl(natseq->correction_pos)) ||
- nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
- htonl(natseq->offset_before)) ||
- nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
- htonl(natseq->offset_after)))
+ if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
+ htonl(seq->correction_pos)) ||
+ nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
+ htonl(seq->offset_before)) ||
+ nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
+ htonl(seq->offset_after)))
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
@@ -408,27 +408,24 @@ nla_put_failure:
}
static inline int
-ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
+ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
{
- struct nf_nat_seq *natseq;
- struct nf_conn_nat *nat = nfct_nat(ct);
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ struct nf_ct_seqadj *seq;
- if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
+ if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
return 0;
- natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
- if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
+ seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
+ if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
return -1;
- natseq = &nat->seq[IP_CT_DIR_REPLY];
- if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
+ seq = &seqadj->seq[IP_CT_DIR_REPLY];
+ if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
return -1;
return 0;
}
-#else
-#define ctnetlink_dump_nat_seq_adj(a, b) (0)
-#endif
static inline int
ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
@@ -502,7 +499,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
ctnetlink_dump_id(skb, ct) < 0 ||
ctnetlink_dump_use(skb, ct) < 0 ||
ctnetlink_dump_master(skb, ct) < 0 ||
- ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
+ ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -707,8 +704,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
ctnetlink_dump_master(skb, ct) < 0)
goto nla_put_failure;
- if (events & (1 << IPCT_NATSEQADJ) &&
- ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
+ if (events & (1 << IPCT_SEQADJ) &&
+ ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
goto nla_put_failure;
}
@@ -1038,21 +1035,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
}
}
- if (del_timer(&ct->timeout)) {
- if (nf_conntrack_event_report(IPCT_DESTROY, ct,
- NETLINK_CB(skb).portid,
- nlmsg_report(nlh)) < 0) {
- nf_ct_delete_from_lists(ct);
- /* we failed to report the event, try later */
- nf_ct_dying_timeout(ct);
- nf_ct_put(ct);
- return 0;
- }
- /* death_by_timeout would report the event again */
- set_bit(IPS_DYING_BIT, &ct->status);
- nf_ct_delete_from_lists(ct);
- nf_ct_put(ct);
- }
+ if (del_timer(&ct->timeout))
+ nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
+
nf_ct_put(ct);
return 0;
@@ -1451,66 +1436,65 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
return err;
}
-#ifdef CONFIG_NF_NAT_NEEDED
-static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
- [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
- [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
- [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
+static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
+ [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 },
+ [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 },
+ [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 },
};
static inline int
-change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
+change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr)
{
int err;
- struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
+ struct nlattr *cda[CTA_SEQADJ_MAX+1];
- err = nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
+ err = nla_parse_nested(cda, CTA_SEQADJ_MAX, attr, seqadj_policy);
if (err < 0)
return err;
- if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
+ if (!cda[CTA_SEQADJ_CORRECTION_POS])
return -EINVAL;
- natseq->correction_pos =
- ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS]));
+ seq->correction_pos =
+ ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
- if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE])
+ if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
return -EINVAL;
- natseq->offset_before =
- ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE]));
+ seq->offset_before =
+ ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
- if (!cda[CTA_NAT_SEQ_OFFSET_AFTER])
+ if (!cda[CTA_SEQADJ_OFFSET_AFTER])
return -EINVAL;
- natseq->offset_after =
- ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER]));
+ seq->offset_after =
+ ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
return 0;
}
static int
-ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
- const struct nlattr * const cda[])
+ctnetlink_change_seq_adj(struct nf_conn *ct,
+ const struct nlattr * const cda[])
{
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
int ret = 0;
- struct nf_conn_nat *nat = nfct_nat(ct);
- if (!nat)
+ if (!seqadj)
return 0;
- if (cda[CTA_NAT_SEQ_ADJ_ORIG]) {
- ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL],
- cda[CTA_NAT_SEQ_ADJ_ORIG]);
+ if (cda[CTA_SEQ_ADJ_ORIG]) {
+ ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
+ cda[CTA_SEQ_ADJ_ORIG]);
if (ret < 0)
return ret;
ct->status |= IPS_SEQ_ADJUST;
}
- if (cda[CTA_NAT_SEQ_ADJ_REPLY]) {
- ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY],
- cda[CTA_NAT_SEQ_ADJ_REPLY]);
+ if (cda[CTA_SEQ_ADJ_REPLY]) {
+ ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
+ cda[CTA_SEQ_ADJ_REPLY]);
if (ret < 0)
return ret;
@@ -1519,7 +1503,6 @@ ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
return 0;
}
-#endif
static int
ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
@@ -1585,13 +1568,12 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
#endif
-#ifdef CONFIG_NF_NAT_NEEDED
- if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
- err = ctnetlink_change_nat_seq_adj(ct, cda);
+ if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
+ err = ctnetlink_change_seq_adj(ct, cda);
if (err < 0)
return err;
}
-#endif
+
if (cda[CTA_LABELS]) {
err = ctnetlink_attach_labels(ct, cda);
if (err < 0)
@@ -1696,13 +1678,11 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
goto err2;
}
-#ifdef CONFIG_NF_NAT_NEEDED
- if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
- err = ctnetlink_change_nat_seq_adj(ct, cda);
+ if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
+ err = ctnetlink_change_seq_adj(ct, cda);
if (err < 0)
goto err2;
}
-#endif
memset(&ct->proto, 0, sizeof(ct->proto));
if (cda[CTA_PROTOINFO]) {
@@ -1816,7 +1796,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
(1 << IPCT_ASSURED) |
(1 << IPCT_HELPER) |
(1 << IPCT_PROTOINFO) |
- (1 << IPCT_NATSEQADJ) |
+ (1 << IPCT_SEQADJ) |
(1 << IPCT_MARK) | events,
ct, NETLINK_CB(skb).portid,
nlmsg_report(nlh));
@@ -1839,7 +1819,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
(1 << IPCT_HELPER) |
(1 << IPCT_LABEL) |
(1 << IPCT_PROTOINFO) |
- (1 << IPCT_NATSEQADJ) |
+ (1 << IPCT_SEQADJ) |
(1 << IPCT_MARK),
ct, NETLINK_CB(skb).portid,
nlmsg_report(nlh));
@@ -1999,6 +1979,27 @@ out:
return err == -EAGAIN ? -ENOBUFS : err;
}
+static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
+ [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
+ [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
+ [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
+ [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
+ [CTA_EXPECT_ID] = { .type = NLA_U32 },
+ [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
+ .len = NF_CT_HELPER_NAME_LEN - 1 },
+ [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
+ [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
+ [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
+ [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
+ [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
+};
+
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask);
+
#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
static size_t
ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
@@ -2073,7 +2074,7 @@ ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
goto nla_put_failure;
if ((ct->status & IPS_SEQ_ADJUST) &&
- ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
+ ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_MARK
@@ -2139,10 +2140,70 @@ ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
return ret;
}
+static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
+ const struct nf_conn *ct,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask)
+{
+ int err;
+
+ err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
+ nf_ct_l3num(ct));
+ if (err < 0)
+ return err;
+
+ return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
+ nf_ct_l3num(ct));
+}
+
+static int
+ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
+ u32 portid, u32 report)
+{
+ struct nlattr *cda[CTA_EXPECT_MAX+1];
+ struct nf_conntrack_tuple tuple, mask;
+ struct nf_conntrack_helper *helper = NULL;
+ struct nf_conntrack_expect *exp;
+ int err;
+
+ err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy);
+ if (err < 0)
+ return err;
+
+ err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
+ ct, &tuple, &mask);
+ if (err < 0)
+ return err;
+
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
+ if (helper == NULL)
+ return -EOPNOTSUPP;
+ }
+
+ exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
+ helper, &tuple, &mask);
+ if (IS_ERR(exp))
+ return PTR_ERR(exp);
+
+ err = nf_ct_expect_related_report(exp, portid, report);
+ if (err < 0) {
+ nf_ct_expect_put(exp);
+ return err;
+ }
+
+ return 0;
+}
+
static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
.build_size = ctnetlink_nfqueue_build_size,
.build = ctnetlink_nfqueue_build,
.parse = ctnetlink_nfqueue_parse,
+ .attach_expect = ctnetlink_nfqueue_attach_expect,
+ .seq_adjust = nf_ct_tcp_seqadj_set,
};
#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
@@ -2510,21 +2571,6 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
return err;
}
-static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
- [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
- [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
- [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
- [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
- [CTA_EXPECT_ID] = { .type = NLA_U32 },
- [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
- .len = NF_CT_HELPER_NAME_LEN - 1 },
- [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
- [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
- [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
- [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
- [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
-};
-
static int
ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
@@ -2747,76 +2793,26 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
#endif
}
-static int
-ctnetlink_create_expect(struct net *net, u16 zone,
- const struct nlattr * const cda[],
- u_int8_t u3,
- u32 portid, int report)
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask)
{
- struct nf_conntrack_tuple tuple, mask, master_tuple;
- struct nf_conntrack_tuple_hash *h = NULL;
+ u_int32_t class = 0;
struct nf_conntrack_expect *exp;
- struct nf_conn *ct;
struct nf_conn_help *help;
- struct nf_conntrack_helper *helper = NULL;
- u_int32_t class = 0;
- int err = 0;
-
- /* caller guarantees that those three CTA_EXPECT_* exist */
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
- if (err < 0)
- return err;
- err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
- if (err < 0)
- return err;
- err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
- if (err < 0)
- return err;
-
- /* Look for master conntrack of this expectation */
- h = nf_conntrack_find_get(net, zone, &master_tuple);
- if (!h)
- return -ENOENT;
- ct = nf_ct_tuplehash_to_ctrack(h);
-
- /* Look for helper of this expectation */
- if (cda[CTA_EXPECT_HELP_NAME]) {
- const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
-
- helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
- nf_ct_protonum(ct));
- if (helper == NULL) {
-#ifdef CONFIG_MODULES
- if (request_module("nfct-helper-%s", helpname) < 0) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- helper = __nf_conntrack_helper_find(helpname,
- nf_ct_l3num(ct),
- nf_ct_protonum(ct));
- if (helper) {
- err = -EAGAIN;
- goto out;
- }
-#endif
- err = -EOPNOTSUPP;
- goto out;
- }
- }
+ int err;
if (cda[CTA_EXPECT_CLASS] && helper) {
class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
- if (class > helper->expect_class_max) {
- err = -EINVAL;
- goto out;
- }
+ if (class > helper->expect_class_max)
+ return ERR_PTR(-EINVAL);
}
exp = nf_ct_expect_alloc(ct);
- if (!exp) {
- err = -ENOMEM;
- goto out;
- }
+ if (!exp)
+ return ERR_PTR(-ENOMEM);
+
help = nfct_help(ct);
if (!help) {
if (!cda[CTA_EXPECT_TIMEOUT]) {
@@ -2854,21 +2850,89 @@ ctnetlink_create_expect(struct net *net, u16 zone,
exp->class = class;
exp->master = ct;
exp->helper = helper;
- memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
- memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
- exp->mask.src.u.all = mask.src.u.all;
+ exp->tuple = *tuple;
+ exp->mask.src.u3 = mask->src.u3;
+ exp->mask.src.u.all = mask->src.u.all;
if (cda[CTA_EXPECT_NAT]) {
err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
- exp, u3);
+ exp, nf_ct_l3num(ct));
if (err < 0)
goto err_out;
}
- err = nf_ct_expect_related_report(exp, portid, report);
+ return exp;
err_out:
nf_ct_expect_put(exp);
-out:
- nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
+ return ERR_PTR(err);
+}
+
+static int
+ctnetlink_create_expect(struct net *net, u16 zone,
+ const struct nlattr * const cda[],
+ u_int8_t u3, u32 portid, int report)
+{
+ struct nf_conntrack_tuple tuple, mask, master_tuple;
+ struct nf_conntrack_tuple_hash *h = NULL;
+ struct nf_conntrack_helper *helper = NULL;
+ struct nf_conntrack_expect *exp;
+ struct nf_conn *ct;
+ int err;
+
+ /* caller guarantees that those three CTA_EXPECT_* exist */
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ if (err < 0)
+ return err;
+ err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+ if (err < 0)
+ return err;
+ err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+ if (err < 0)
+ return err;
+
+ /* Look for master conntrack of this expectation */
+ h = nf_conntrack_find_get(net, zone, &master_tuple);
+ if (!h)
+ return -ENOENT;
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, u3,
+ nf_ct_protonum(ct));
+ if (helper == NULL) {
+#ifdef CONFIG_MODULES
+ if (request_module("nfct-helper-%s", helpname) < 0) {
+ err = -EOPNOTSUPP;
+ goto err_ct;
+ }
+ helper = __nf_conntrack_helper_find(helpname, u3,
+ nf_ct_protonum(ct));
+ if (helper) {
+ err = -EAGAIN;
+ goto err_ct;
+ }
+#endif
+ err = -EOPNOTSUPP;
+ goto err_ct;
+ }
+ }
+
+ exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
+ if (IS_ERR(exp)) {
+ err = PTR_ERR(exp);
+ goto err_ct;
+ }
+
+ err = nf_ct_expect_related_report(exp, portid, report);
+ if (err < 0)
+ goto err_exp;
+
+ return 0;
+err_exp:
+ nf_ct_expect_put(exp);
+err_ct:
+ nf_ct_put(ct);
return err;
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 0ab9636ac57e..ce3004156eeb 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -281,7 +281,7 @@ void nf_ct_l3proto_pernet_unregister(struct net *net,
nf_ct_l3proto_unregister_sysctl(net, proto);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l3proto, proto);
+ nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
@@ -476,7 +476,7 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
+ nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 2f8010707d01..44d1ea32570a 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -27,6 +27,8 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
@@ -495,21 +497,6 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
}
}
-#ifdef CONFIG_NF_NAT_NEEDED
-static inline s16 nat_offset(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq)
-{
- typeof(nf_ct_nat_offset) get_offset = rcu_dereference(nf_ct_nat_offset);
-
- return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
-}
-#define NAT_OFFSET(ct, dir, seq) \
- (nat_offset(ct, dir, seq))
-#else
-#define NAT_OFFSET(ct, dir, seq) 0
-#endif
-
static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp *state,
enum ip_conntrack_dir dir,
@@ -525,7 +512,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
- s16 receiver_offset;
+ s32 receiver_offset;
bool res, in_recv_win;
/*
@@ -540,7 +527,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
tcp_sack(skb, dataoff, tcph, &sack);
/* Take into account NAT sequence number mangling */
- receiver_offset = NAT_OFFSET(ct, !dir, ack - 1);
+ receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
ack -= receiver_offset;
sack -= receiver_offset;
@@ -960,6 +947,21 @@ static int tcp_packet(struct nf_conn *ct,
"state %s ", tcp_conntrack_names[old_state]);
return NF_ACCEPT;
case TCP_CONNTRACK_MAX:
+ /* Special case for SYN proxy: when the SYN to the server or
+ * the SYN/ACK from the server is lost, the client may transmit
+ * a keep-alive packet while in SYN_SENT state. This needs to
+ * be associated with the original conntrack entry in order to
+ * generate a new SYN with the correct sequence number.
+ */
+ if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
+ index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
+ ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
+ ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
+ pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
+ spin_unlock_bh(&ct->lock);
+ return NF_ACCEPT;
+ }
+
/* Invalid packet */
pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
dir, get_conntrack_index(th), old_state);
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
new file mode 100644
index 000000000000..5f9bfd060dea
--- /dev/null
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -0,0 +1,238 @@
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+
+int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ s32 off)
+{
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ struct nf_conn_seqadj *seqadj;
+ struct nf_ct_seqadj *this_way;
+
+ if (off == 0)
+ return 0;
+
+ set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
+
+ seqadj = nfct_seqadj(ct);
+ this_way = &seqadj->seq[dir];
+ this_way->offset_before = off;
+ this_way->offset_after = off;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_seqadj_init);
+
+int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ __be32 seq, s32 off)
+{
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ struct nf_ct_seqadj *this_way;
+
+ if (off == 0)
+ return 0;
+
+ set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
+
+ spin_lock_bh(&ct->lock);
+ this_way = &seqadj->seq[dir];
+ if (this_way->offset_before == this_way->offset_after ||
+ before(this_way->correction_pos, seq)) {
+ this_way->correction_pos = seq;
+ this_way->offset_before = this_way->offset_after;
+ this_way->offset_after += off;
+ }
+ spin_unlock_bh(&ct->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_seqadj_set);
+
+void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ s32 off)
+{
+ const struct tcphdr *th;
+
+ if (nf_ct_protonum(ct) != IPPROTO_TCP)
+ return;
+
+ th = (struct tcphdr *)(skb_network_header(skb) + ip_hdrlen(skb));
+ nf_ct_seqadj_set(ct, ctinfo, th->seq, off);
+}
+EXPORT_SYMBOL_GPL(nf_ct_tcp_seqadj_set);
+
+/* Adjust one found SACK option including checksum correction */
+static void nf_ct_sack_block_adjust(struct sk_buff *skb,
+ struct tcphdr *tcph,
+ unsigned int sackoff,
+ unsigned int sackend,
+ struct nf_ct_seqadj *seq)
+{
+ while (sackoff < sackend) {
+ struct tcp_sack_block_wire *sack;
+ __be32 new_start_seq, new_end_seq;
+
+ sack = (void *)skb->data + sackoff;
+ if (after(ntohl(sack->start_seq) - seq->offset_before,
+ seq->correction_pos))
+ new_start_seq = htonl(ntohl(sack->start_seq) -
+ seq->offset_after);
+ else
+ new_start_seq = htonl(ntohl(sack->start_seq) -
+ seq->offset_before);
+
+ if (after(ntohl(sack->end_seq) - seq->offset_before,
+ seq->correction_pos))
+ new_end_seq = htonl(ntohl(sack->end_seq) -
+ seq->offset_after);
+ else
+ new_end_seq = htonl(ntohl(sack->end_seq) -
+ seq->offset_before);
+
+ pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
+ ntohl(sack->start_seq), new_start_seq,
+ ntohl(sack->end_seq), new_end_seq);
+
+ inet_proto_csum_replace4(&tcph->check, skb,
+ sack->start_seq, new_start_seq, 0);
+ inet_proto_csum_replace4(&tcph->check, skb,
+ sack->end_seq, new_end_seq, 0);
+ sack->start_seq = new_start_seq;
+ sack->end_seq = new_end_seq;
+ sackoff += sizeof(*sack);
+ }
+}
+
+/* TCP SACK sequence number adjustment */
+static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
+ unsigned int protoff,
+ struct tcphdr *tcph,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ unsigned int dir, optoff, optend;
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+
+ optoff = protoff + sizeof(struct tcphdr);
+ optend = protoff + tcph->doff * 4;
+
+ if (!skb_make_writable(skb, optend))
+ return 0;
+
+ dir = CTINFO2DIR(ctinfo);
+
+ while (optoff < optend) {
+ /* Usually: option, length. */
+ unsigned char *op = skb->data + optoff;
+
+ switch (op[0]) {
+ case TCPOPT_EOL:
+ return 1;
+ case TCPOPT_NOP:
+ optoff++;
+ continue;
+ default:
+ /* no partial options */
+ if (optoff + 1 == optend ||
+ optoff + op[1] > optend ||
+ op[1] < 2)
+ return 0;
+ if (op[0] == TCPOPT_SACK &&
+ op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
+ ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
+ nf_ct_sack_block_adjust(skb, tcph, optoff + 2,
+ optoff+op[1],
+ &seqadj->seq[!dir]);
+ optoff += op[1];
+ }
+ }
+ return 1;
+}
+
+/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
+int nf_ct_seq_adjust(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff)
+{
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ struct tcphdr *tcph;
+ __be32 newseq, newack;
+ s32 seqoff, ackoff;
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ struct nf_ct_seqadj *this_way, *other_way;
+ int res;
+
+ this_way = &seqadj->seq[dir];
+ other_way = &seqadj->seq[!dir];
+
+ if (!skb_make_writable(skb, protoff + sizeof(*tcph)))
+ return 0;
+
+ tcph = (void *)skb->data + protoff;
+ spin_lock_bh(&ct->lock);
+ if (after(ntohl(tcph->seq), this_way->correction_pos))
+ seqoff = this_way->offset_after;
+ else
+ seqoff = this_way->offset_before;
+
+ if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
+ other_way->correction_pos))
+ ackoff = other_way->offset_after;
+ else
+ ackoff = other_way->offset_before;
+
+ newseq = htonl(ntohl(tcph->seq) + seqoff);
+ newack = htonl(ntohl(tcph->ack_seq) - ackoff);
+
+ inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
+ inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
+
+ pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
+ ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
+ ntohl(newack));
+
+ tcph->seq = newseq;
+ tcph->ack_seq = newack;
+
+ res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+ spin_unlock_bh(&ct->lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(nf_ct_seq_adjust);
+
+s32 nf_ct_seq_offset(const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ u32 seq)
+{
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ struct nf_ct_seqadj *this_way;
+
+ if (!seqadj)
+ return 0;
+
+ this_way = &seqadj->seq[dir];
+ return after(seq, this_way->correction_pos) ?
+ this_way->offset_after : this_way->offset_before;
+}
+EXPORT_SYMBOL_GPL(nf_ct_seq_offset);
+
+static struct nf_ct_ext_type nf_ct_seqadj_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_seqadj),
+ .align = __alignof__(struct nf_conn_seqadj),
+ .id = NF_CT_EXT_SEQADJ,
+};
+
+int nf_conntrack_seqadj_init(void)
+{
+ return nf_ct_extend_register(&nf_ct_seqadj_extend);
+}
+
+void nf_conntrack_seqadj_fini(void)
+{
+ nf_ct_extend_unregister(&nf_ct_seqadj_extend);
+}
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 038eee5c8f85..6f0f4f7f68a5 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -25,6 +25,7 @@
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_nat.h>
@@ -402,6 +403,9 @@ nf_nat_setup_info(struct nf_conn *ct,
ct->status |= IPS_SRC_NAT;
else
ct->status |= IPS_DST_NAT;
+
+ if (nfct_help(ct))
+ nfct_seqadj_ext_add(ct);
}
if (maniptype == NF_NAT_MANIP_SRC) {
@@ -497,7 +501,7 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
@@ -511,7 +515,7 @@ static void nf_nat_l3proto_clean(u8 l3proto)
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
@@ -749,7 +753,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{
struct nf_nat_proto_clean clean = {};
- nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
synchronize_rcu();
nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
}
@@ -764,10 +768,6 @@ static struct nf_ct_helper_expectfn follow_master_nat = {
.expectfn = nf_nat_follow_master,
};
-static struct nfq_ct_nat_hook nfq_ct_nat = {
- .seq_adjust = nf_nat_tcp_seq_adjust,
-};
-
static int __init nf_nat_init(void)
{
int ret;
@@ -787,14 +787,9 @@ static int __init nf_nat_init(void)
/* Initialize fake conntrack so that NAT will skip it */
nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
- BUG_ON(nf_nat_seq_adjust_hook != NULL);
- RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
nfnetlink_parse_nat_setup);
- BUG_ON(nf_ct_nat_offset != NULL);
- RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
- RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
#ifdef CONFIG_XFRM
BUG_ON(nf_nat_decode_session_hook != NULL);
RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
@@ -813,10 +808,7 @@ static void __exit nf_nat_cleanup(void)
unregister_pernet_subsys(&nf_nat_net_ops);
nf_ct_extend_unregister(&nat_extend);
nf_ct_helper_expectfn_unregister(&follow_master_nat);
- RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
- RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
- RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
#endif
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index 85e20a919081..2840abb5bb99 100644
--- a/net/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -20,74 +20,13 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_l3proto.h>
#include <net/netfilter/nf_nat_l4proto.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
-#define DUMP_OFFSET(x) \
- pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
- x->offset_before, x->offset_after, x->correction_pos);
-
-static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
-
-/* Setup TCP sequence correction given this change at this sequence */
-static inline void
-adjust_tcp_sequence(u32 seq,
- int sizediff,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- struct nf_conn_nat *nat = nfct_nat(ct);
- struct nf_nat_seq *this_way = &nat->seq[dir];
-
- pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
- seq, sizediff);
-
- pr_debug("adjust_tcp_sequence: Seq_offset before: ");
- DUMP_OFFSET(this_way);
-
- spin_lock_bh(&nf_nat_seqofs_lock);
-
- /* SYN adjust. If it's uninitialized, or this is after last
- * correction, record it: we don't handle more than one
- * adjustment in the window, but do deal with common case of a
- * retransmit */
- if (this_way->offset_before == this_way->offset_after ||
- before(this_way->correction_pos, seq)) {
- this_way->correction_pos = seq;
- this_way->offset_before = this_way->offset_after;
- this_way->offset_after += sizediff;
- }
- spin_unlock_bh(&nf_nat_seqofs_lock);
-
- pr_debug("adjust_tcp_sequence: Seq_offset after: ");
- DUMP_OFFSET(this_way);
-}
-
-/* Get the offset value, for conntrack */
-s16 nf_nat_get_offset(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq)
-{
- struct nf_conn_nat *nat = nfct_nat(ct);
- struct nf_nat_seq *this_way;
- s16 offset;
-
- if (!nat)
- return 0;
-
- this_way = &nat->seq[dir];
- spin_lock_bh(&nf_nat_seqofs_lock);
- offset = after(seq, this_way->correction_pos)
- ? this_way->offset_after : this_way->offset_before;
- spin_unlock_bh(&nf_nat_seqofs_lock);
-
- return offset;
-}
-
/* Frobs data inside this packet, which is linear. */
static void mangle_contents(struct sk_buff *skb,
unsigned int dataoff,
@@ -142,30 +81,6 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
return 1;
}
-void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- __be32 seq, s16 off)
-{
- if (!off)
- return;
- set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
- adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
- nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
-}
-EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
-
-void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
- u32 ctinfo, int off)
-{
- const struct tcphdr *th;
-
- if (nf_ct_protonum(ct) != IPPROTO_TCP)
- return;
-
- th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb));
- nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
-}
-EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
-
/* Generic function for mangling variable-length address changes inside
* NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
* command in FTP).
@@ -210,8 +125,8 @@ int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
datalen, oldlen);
if (adjust && rep_len != match_len)
- nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
- (int)rep_len - (int)match_len);
+ nf_ct_seqadj_set(ct, ctinfo, tcph->seq,
+ (int)rep_len - (int)match_len);
return 1;
}
@@ -271,145 +186,6 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
}
EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
-/* Adjust one found SACK option including checksum correction */
-static void
-sack_adjust(struct sk_buff *skb,
- struct tcphdr *tcph,
- unsigned int sackoff,
- unsigned int sackend,
- struct nf_nat_seq *natseq)
-{
- while (sackoff < sackend) {
- struct tcp_sack_block_wire *sack;
- __be32 new_start_seq, new_end_seq;
-
- sack = (void *)skb->data + sackoff;
- if (after(ntohl(sack->start_seq) - natseq->offset_before,
- natseq->correction_pos))
- new_start_seq = htonl(ntohl(sack->start_seq)
- - natseq->offset_after);
- else
- new_start_seq = htonl(ntohl(sack->start_seq)
- - natseq->offset_before);
-
- if (after(ntohl(sack->end_seq) - natseq->offset_before,
- natseq->correction_pos))
- new_end_seq = htonl(ntohl(sack->end_seq)
- - natseq->offset_after);
- else
- new_end_seq = htonl(ntohl(sack->end_seq)
- - natseq->offset_before);
-
- pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
- ntohl(sack->start_seq), new_start_seq,
- ntohl(sack->end_seq), new_end_seq);
-
- inet_proto_csum_replace4(&tcph->check, skb,
- sack->start_seq, new_start_seq, 0);
- inet_proto_csum_replace4(&tcph->check, skb,
- sack->end_seq, new_end_seq, 0);
- sack->start_seq = new_start_seq;
- sack->end_seq = new_end_seq;
- sackoff += sizeof(*sack);
- }
-}
-
-/* TCP SACK sequence number adjustment */
-static inline unsigned int
-nf_nat_sack_adjust(struct sk_buff *skb,
- unsigned int protoff,
- struct tcphdr *tcph,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- unsigned int dir, optoff, optend;
- struct nf_conn_nat *nat = nfct_nat(ct);
-
- optoff = protoff + sizeof(struct tcphdr);
- optend = protoff + tcph->doff * 4;
-
- if (!skb_make_writable(skb, optend))
- return 0;
-
- dir = CTINFO2DIR(ctinfo);
-
- while (optoff < optend) {
- /* Usually: option, length. */
- unsigned char *op = skb->data + optoff;
-
- switch (op[0]) {
- case TCPOPT_EOL:
- return 1;
- case TCPOPT_NOP:
- optoff++;
- continue;
- default:
- /* no partial options */
- if (optoff + 1 == optend ||
- optoff + op[1] > optend ||
- op[1] < 2)
- return 0;
- if (op[0] == TCPOPT_SACK &&
- op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
- ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
- sack_adjust(skb, tcph, optoff+2,
- optoff+op[1], &nat->seq[!dir]);
- optoff += op[1];
- }
- }
- return 1;
-}
-
-/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
-int
-nf_nat_seq_adjust(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff)
-{
- struct tcphdr *tcph;
- int dir;
- __be32 newseq, newack;
- s16 seqoff, ackoff;
- struct nf_conn_nat *nat = nfct_nat(ct);
- struct nf_nat_seq *this_way, *other_way;
-
- dir = CTINFO2DIR(ctinfo);
-
- this_way = &nat->seq[dir];
- other_way = &nat->seq[!dir];
-
- if (!skb_make_writable(skb, protoff + sizeof(*tcph)))
- return 0;
-
- tcph = (void *)skb->data + protoff;
- if (after(ntohl(tcph->seq), this_way->correction_pos))
- seqoff = this_way->offset_after;
- else
- seqoff = this_way->offset_before;
-
- if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
- other_way->correction_pos))
- ackoff = other_way->offset_after;
- else
- ackoff = other_way->offset_before;
-
- newseq = htonl(ntohl(tcph->seq) + seqoff);
- newack = htonl(ntohl(tcph->ack_seq) - ackoff);
-
- inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
- inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
-
- pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
- ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
- ntohl(newack));
-
- tcph->seq = newseq;
- tcph->ack_seq = newack;
-
- return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
-}
-
/* Setup NAT on this expected conntrack so it follows master. */
/* If we fail to get a free NAT slot, we'll get dropped on confirm */
void nf_nat_follow_master(struct nf_conn *ct,
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
index 396e55d46f90..754536f2c674 100644
--- a/net/netfilter/nf_nat_proto_sctp.c
+++ b/net/netfilter/nf_nat_proto_sctp.c
@@ -34,9 +34,7 @@ sctp_manip_pkt(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
- struct sk_buff *frag;
sctp_sctphdr_t *hdr;
- __u32 crc32;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return false;
@@ -51,11 +49,7 @@ sctp_manip_pkt(struct sk_buff *skb,
hdr->dest = tuple->dst.u.sctp.port;
}
- crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
- skb_walk_frags(skb, frag)
- crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag),
- crc32);
- hdr->checksum = sctp_end_cksum(crc32);
+ hdr->checksum = sctp_compute_cksum(skb, hdroff);
return true;
}
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index dac11f73868e..f9790405b7ff 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -20,6 +20,7 @@
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <linux/netfilter/nf_conntrack_sip.h>
MODULE_LICENSE("GPL");
@@ -308,7 +309,7 @@ static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff,
return;
th = (struct tcphdr *)(skb->data + protoff);
- nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
+ nf_ct_seqadj_set(ct, ctinfo, th->seq, off);
}
/* Handles expected signalling connections and media streams */
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
new file mode 100644
index 000000000000..6fd967c6278c
--- /dev/null
+++ b/net/netfilter/nf_synproxy_core.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <asm/unaligned.h>
+#include <net/tcp.h>
+#include <net/netns/generic.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <linux/netfilter/xt_SYNPROXY.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+
+int synproxy_net_id;
+EXPORT_SYMBOL_GPL(synproxy_net_id);
+
+void
+synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ const struct tcphdr *th, struct synproxy_options *opts)
+{
+ int length = (th->doff * 4) - sizeof(*th);
+ u8 buf[40], *ptr;
+
+ ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
+ BUG_ON(ptr == NULL);
+
+ opts->options = 0;
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize;
+
+ switch (opcode) {
+ case TCPOPT_EOL:
+ return;
+ case TCPOPT_NOP:
+ length--;
+ continue;
+ default:
+ opsize = *ptr++;
+ if (opsize < 2)
+ return;
+ if (opsize > length)
+ return;
+
+ switch (opcode) {
+ case TCPOPT_MSS:
+ if (opsize == TCPOLEN_MSS) {
+ opts->mss = get_unaligned_be16(ptr);
+ opts->options |= XT_SYNPROXY_OPT_MSS;
+ }
+ break;
+ case TCPOPT_WINDOW:
+ if (opsize == TCPOLEN_WINDOW) {
+ opts->wscale = *ptr;
+ if (opts->wscale > 14)
+ opts->wscale = 14;
+ opts->options |= XT_SYNPROXY_OPT_WSCALE;
+ }
+ break;
+ case TCPOPT_TIMESTAMP:
+ if (opsize == TCPOLEN_TIMESTAMP) {
+ opts->tsval = get_unaligned_be32(ptr);
+ opts->tsecr = get_unaligned_be32(ptr + 4);
+ opts->options |= XT_SYNPROXY_OPT_TIMESTAMP;
+ }
+ break;
+ case TCPOPT_SACK_PERM:
+ if (opsize == TCPOLEN_SACK_PERM)
+ opts->options |= XT_SYNPROXY_OPT_SACK_PERM;
+ break;
+ }
+
+ ptr += opsize - 2;
+ length -= opsize;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(synproxy_parse_options);
+
+unsigned int synproxy_options_size(const struct synproxy_options *opts)
+{
+ unsigned int size = 0;
+
+ if (opts->options & XT_SYNPROXY_OPT_MSS)
+ size += TCPOLEN_MSS_ALIGNED;
+ if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
+ size += TCPOLEN_TSTAMP_ALIGNED;
+ else if (opts->options & XT_SYNPROXY_OPT_SACK_PERM)
+ size += TCPOLEN_SACKPERM_ALIGNED;
+ if (opts->options & XT_SYNPROXY_OPT_WSCALE)
+ size += TCPOLEN_WSCALE_ALIGNED;
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(synproxy_options_size);
+
+void
+synproxy_build_options(struct tcphdr *th, const struct synproxy_options *opts)
+{
+ __be32 *ptr = (__be32 *)(th + 1);
+ u8 options = opts->options;
+
+ if (options & XT_SYNPROXY_OPT_MSS)
+ *ptr++ = htonl((TCPOPT_MSS << 24) |
+ (TCPOLEN_MSS << 16) |
+ opts->mss);
+
+ if (options & XT_SYNPROXY_OPT_TIMESTAMP) {
+ if (options & XT_SYNPROXY_OPT_SACK_PERM)
+ *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
+ (TCPOLEN_SACK_PERM << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+ else
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+
+ *ptr++ = htonl(opts->tsval);
+ *ptr++ = htonl(opts->tsecr);
+ } else if (options & XT_SYNPROXY_OPT_SACK_PERM)
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_SACK_PERM << 8) |
+ TCPOLEN_SACK_PERM);
+
+ if (options & XT_SYNPROXY_OPT_WSCALE)
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_WINDOW << 16) |
+ (TCPOLEN_WINDOW << 8) |
+ opts->wscale);
+}
+EXPORT_SYMBOL_GPL(synproxy_build_options);
+
+void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
+ struct synproxy_options *opts)
+{
+ opts->tsecr = opts->tsval;
+ opts->tsval = tcp_time_stamp & ~0x3f;
+
+ if (opts->options & XT_SYNPROXY_OPT_WSCALE)
+ opts->tsval |= info->wscale;
+ else
+ opts->tsval |= 0xf;
+
+ if (opts->options & XT_SYNPROXY_OPT_SACK_PERM)
+ opts->tsval |= 1 << 4;
+
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ opts->tsval |= 1 << 5;
+}
+EXPORT_SYMBOL_GPL(synproxy_init_timestamp_cookie);
+
+void synproxy_check_timestamp_cookie(struct synproxy_options *opts)
+{
+ opts->wscale = opts->tsecr & 0xf;
+ if (opts->wscale != 0xf)
+ opts->options |= XT_SYNPROXY_OPT_WSCALE;
+
+ opts->options |= opts->tsecr & (1 << 4) ? XT_SYNPROXY_OPT_SACK_PERM : 0;
+
+ opts->options |= opts->tsecr & (1 << 5) ? XT_SYNPROXY_OPT_ECN : 0;
+}
+EXPORT_SYMBOL_GPL(synproxy_check_timestamp_cookie);
+
+unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
+ unsigned int protoff,
+ struct tcphdr *th,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_conn_synproxy *synproxy)
+{
+ unsigned int optoff, optend;
+ u32 *ptr, old;
+
+ if (synproxy->tsoff == 0)
+ return 1;
+
+ optoff = protoff + sizeof(struct tcphdr);
+ optend = protoff + th->doff * 4;
+
+ if (!skb_make_writable(skb, optend))
+ return 0;
+
+ while (optoff < optend) {
+ unsigned char *op = skb->data + optoff;
+
+ switch (op[0]) {
+ case TCPOPT_EOL:
+ return 1;
+ case TCPOPT_NOP:
+ optoff++;
+ continue;
+ default:
+ if (optoff + 1 == optend ||
+ optoff + op[1] > optend ||
+ op[1] < 2)
+ return 0;
+ if (op[0] == TCPOPT_TIMESTAMP &&
+ op[1] == TCPOLEN_TIMESTAMP) {
+ if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
+ ptr = (u32 *)&op[2];
+ old = *ptr;
+ *ptr = htonl(ntohl(*ptr) -
+ synproxy->tsoff);
+ } else {
+ ptr = (u32 *)&op[6];
+ old = *ptr;
+ *ptr = htonl(ntohl(*ptr) +
+ synproxy->tsoff);
+ }
+ inet_proto_csum_replace4(&th->check, skb,
+ old, *ptr, 0);
+ return 1;
+ }
+ optoff += op[1];
+ }
+ }
+ return 1;
+}
+EXPORT_SYMBOL_GPL(synproxy_tstamp_adjust);
+
+static struct nf_ct_ext_type nf_ct_synproxy_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_synproxy),
+ .align = __alignof__(struct nf_conn_synproxy),
+ .id = NF_CT_EXT_SYNPROXY,
+};
+
+#ifdef CONFIG_PROC_FS
+static void *synproxy_cpu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq));
+ int cpu;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu + 1;
+ return per_cpu_ptr(snet->stats, cpu);
+ }
+
+ return NULL;
+}
+
+static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq));
+ int cpu;
+
+ for (cpu = *pos; cpu < nr_cpu_ids; cpu++) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu + 1;
+ return per_cpu_ptr(snet->stats, cpu);
+ }
+
+ return NULL;
+}
+
+static void synproxy_cpu_seq_stop(struct seq_file *seq, void *v)
+{
+ return;
+}
+
+static int synproxy_cpu_seq_show(struct seq_file *seq, void *v)
+{
+ struct synproxy_stats *stats = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "entries\t\tsyn_received\t"
+ "cookie_invalid\tcookie_valid\t"
+ "cookie_retrans\tconn_reopened\n");
+ return 0;
+ }
+
+ seq_printf(seq, "%08x\t%08x\t%08x\t%08x\t%08x\t%08x\n", 0,
+ stats->syn_received,
+ stats->cookie_invalid,
+ stats->cookie_valid,
+ stats->cookie_retrans,
+ stats->conn_reopened);
+
+ return 0;
+}
+
+static const struct seq_operations synproxy_cpu_seq_ops = {
+ .start = synproxy_cpu_seq_start,
+ .next = synproxy_cpu_seq_next,
+ .stop = synproxy_cpu_seq_stop,
+ .show = synproxy_cpu_seq_show,
+};
+
+static int synproxy_cpu_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &synproxy_cpu_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations synproxy_cpu_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = synproxy_cpu_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+static int __net_init synproxy_proc_init(struct net *net)
+{
+ if (!proc_create("synproxy", S_IRUGO, net->proc_net_stat,
+ &synproxy_cpu_seq_fops))
+ return -ENOMEM;
+ return 0;
+}
+
+static void __net_exit synproxy_proc_exit(struct net *net)
+{
+ remove_proc_entry("synproxy", net->proc_net_stat);
+}
+#else
+static int __net_init synproxy_proc_init(struct net *net)
+{
+ return 0;
+}
+
+static void __net_exit synproxy_proc_exit(struct net *net)
+{
+ return;
+}
+#endif /* CONFIG_PROC_FS */
+
+static int __net_init synproxy_net_init(struct net *net)
+{
+ struct synproxy_net *snet = synproxy_pernet(net);
+ struct nf_conntrack_tuple t;
+ struct nf_conn *ct;
+ int err = -ENOMEM;
+
+ memset(&t, 0, sizeof(t));
+ ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL);
+ if (IS_ERR(ct)) {
+ err = PTR_ERR(ct);
+ goto err1;
+ }
+
+ if (!nfct_seqadj_ext_add(ct))
+ goto err2;
+ if (!nfct_synproxy_ext_add(ct))
+ goto err2;
+ __set_bit(IPS_TEMPLATE_BIT, &ct->status);
+ __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+
+ snet->tmpl = ct;
+
+ snet->stats = alloc_percpu(struct synproxy_stats);
+ if (snet->stats == NULL)
+ goto err2;
+
+ err = synproxy_proc_init(net);
+ if (err < 0)
+ goto err3;
+
+ return 0;
+
+err3:
+ free_percpu(snet->stats);
+err2:
+ nf_conntrack_free(ct);
+err1:
+ return err;
+}
+
+static void __net_exit synproxy_net_exit(struct net *net)
+{
+ struct synproxy_net *snet = synproxy_pernet(net);
+
+ nf_conntrack_free(snet->tmpl);
+ synproxy_proc_exit(net);
+ free_percpu(snet->stats);
+}
+
+static struct pernet_operations synproxy_net_ops = {
+ .init = synproxy_net_init,
+ .exit = synproxy_net_exit,
+ .id = &synproxy_net_id,
+ .size = sizeof(struct synproxy_net),
+};
+
+static int __init synproxy_core_init(void)
+{
+ int err;
+
+ err = nf_ct_extend_register(&nf_ct_synproxy_extend);
+ if (err < 0)
+ goto err1;
+
+ err = register_pernet_subsys(&synproxy_net_ops);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ nf_ct_extend_unregister(&nf_ct_synproxy_extend);
+err1:
+ return err;
+}
+
+static void __exit synproxy_core_exit(void)
+{
+ unregister_pernet_subsys(&synproxy_net_ops);
+ nf_ct_extend_unregister(&nf_ct_synproxy_extend);
+}
+
+module_init(synproxy_core_init);
+module_exit(synproxy_core_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
deleted file mode 100644
index 474d621cbc2e..000000000000
--- a/net/netfilter/nf_tproxy_core.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Transparent proxy support for Linux/iptables
- *
- * Copyright (c) 2006-2007 BalaBit IT Ltd.
- * Author: Balazs Scheidler, Krisztian Kovacs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/net.h>
-#include <linux/if.h>
-#include <linux/netdevice.h>
-#include <net/udp.h>
-#include <net/netfilter/nf_tproxy_core.h>
-
-
-static void
-nf_tproxy_destructor(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
-
- skb->sk = NULL;
- skb->destructor = NULL;
-
- if (sk)
- sock_put(sk);
-}
-
-/* consumes sk */
-void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
-{
- /* assigning tw sockets complicates things; most
- * skb->sk->X checks would have to test sk->sk_state first */
- if (sk->sk_state == TCP_TIME_WAIT) {
- inet_twsk_put(inet_twsk(sk));
- return;
- }
-
- skb_orphan(skb);
- skb->sk = sk;
- skb->destructor = nf_tproxy_destructor;
-}
-EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
-
-static int __init nf_tproxy_init(void)
-{
- pr_info("NF_TPROXY: Transparent proxy support initialized, version 4.1.0\n");
- pr_info("NF_TPROXY: Copyright (c) 2006-2007 BalaBit IT Ltd.\n");
- return 0;
-}
-
-module_init(nf_tproxy_init);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Krisztian Kovacs");
-MODULE_DESCRIPTION("Transparent proxy support core routines");
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 8a703c3dd318..95a98c8c1da6 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -862,6 +862,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
[NFQA_MARK] = { .type = NLA_U32 },
[NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
[NFQA_CT] = { .type = NLA_UNSPEC },
+ [NFQA_EXP] = { .type = NLA_UNSPEC },
};
static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -990,9 +991,14 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (entry == NULL)
return -ENOENT;
- rcu_read_lock();
- if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
+ if (nfqa[NFQA_CT]) {
ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
+ if (ct && nfqa[NFQA_EXP]) {
+ nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
+ NETLINK_CB(skb).portid,
+ nlmsg_report(nlh));
+ }
+ }
if (nfqa[NFQA_PAYLOAD]) {
u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
@@ -1005,7 +1011,6 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (ct)
nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
}
- rcu_read_unlock();
if (nfqa[NFQA_MARK])
entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
index ab61d66bc0b9..96cac50e0d12 100644
--- a/net/netfilter/nfnetlink_queue_ct.c
+++ b/net/netfilter/nfnetlink_queue_ct.c
@@ -87,12 +87,27 @@ nla_put_failure:
void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int diff)
{
- struct nfq_ct_nat_hook *nfq_nat_ct;
+ struct nfq_ct_hook *nfq_ct;
- nfq_nat_ct = rcu_dereference(nfq_ct_nat_hook);
- if (nfq_nat_ct == NULL)
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
return;
if ((ct->status & IPS_NAT_MASK) && diff)
- nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
+ nfq_ct->seq_adjust(skb, ct, ctinfo, diff);
+}
+
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report)
+{
+ struct nfq_ct_hook *nfq_ct;
+
+ if (nf_ct_is_untracked(ct))
+ return 0;
+
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return -EOPNOTSUPP;
+
+ return nfq_ct->attach_expect(attr, ct, portid, report);
}
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 6113cc7efffc..cd24290f3b2f 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -60,7 +60,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
/* This is a fragment, no TCP header is available */
if (par->fragoff != 0)
- return XT_CONTINUE;
+ return 0;
if (!skb_make_writable(skb, skb->len))
return -1;
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index d7f195388f66..5d8a3a3cd5a7 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -15,7 +15,9 @@
#include <linux/ip.h>
#include <net/checksum.h>
#include <net/udp.h>
+#include <net/tcp.h>
#include <net/inet_sock.h>
+#include <net/inet_hashtables.h>
#include <linux/inetdevice.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
@@ -26,13 +28,18 @@
#define XT_TPROXY_HAVE_IPV6 1
#include <net/if_inet6.h>
#include <net/addrconf.h>
+#include <net/inet6_hashtables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
-#include <net/netfilter/nf_tproxy_core.h>
#include <linux/netfilter/xt_TPROXY.h>
+enum nf_tproxy_lookup_t {
+ NFT_LOOKUP_LISTENER,
+ NFT_LOOKUP_ESTABLISHED,
+};
+
static bool tproxy_sk_is_transparent(struct sock *sk)
{
if (sk->sk_state != TCP_TIME_WAIT) {
@@ -68,6 +75,157 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
return laddr ? laddr : daddr;
}
+/*
+ * This is used when the user wants to intercept a connection matching
+ * an explicit iptables rule. In this case the sockets are assumed
+ * matching in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple, it is returned, assuming the redirection
+ * already took place and we process a packet belonging to an
+ * established connection
+ *
+ * - match: if there's a listening socket matching the redirection
+ * (e.g. on-port & on-ip of the connection), it is returned,
+ * regardless if it was bound to 0.0.0.0 or an explicit
+ * address. The reasoning is that if there's an explicit rule, it
+ * does not really matter if the listener is bound to an interface
+ * or to 0. The user already stated that he wants redirection
+ * (since he added the rule).
+ *
+ * Please note that there's an overlap between what a TPROXY target
+ * and a socket match will match. Normally if you have both rules the
+ * "socket" match will be the first one, effectively all packets
+ * belonging to established connections going through that one.
+ */
+static inline struct sock *
+nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type)
+{
+ struct sock *sk;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ switch (lookup_type) {
+ case NFT_LOOKUP_LISTENER:
+ sk = inet_lookup_listener(net, &tcp_hashinfo,
+ saddr, sport,
+ daddr, dport,
+ in->ifindex);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ break;
+ case NFT_LOOKUP_ESTABLISHED:
+ sk = inet_lookup_established(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case IPPROTO_UDP:
+ sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ if (sk) {
+ int connected = (sk->sk_state == TCP_ESTABLISHED);
+ int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+ (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+ sock_put(sk);
+ sk = NULL;
+ }
+ }
+ break;
+ default:
+ WARN_ON(1);
+ sk = NULL;
+ }
+
+ pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
+ protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
+
+ return sk;
+}
+
+#ifdef XT_TPROXY_HAVE_IPV6
+static inline struct sock *
+nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type)
+{
+ struct sock *sk;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ switch (lookup_type) {
+ case NFT_LOOKUP_LISTENER:
+ sk = inet6_lookup_listener(net, &tcp_hashinfo,
+ saddr, sport,
+ daddr, ntohs(dport),
+ in->ifindex);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ break;
+ case NFT_LOOKUP_ESTABLISHED:
+ sk = __inet6_lookup_established(net, &tcp_hashinfo,
+ saddr, sport, daddr, ntohs(dport),
+ in->ifindex);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case IPPROTO_UDP:
+ sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ if (sk) {
+ int connected = (sk->sk_state == TCP_ESTABLISHED);
+ int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+ (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+ sock_put(sk);
+ sk = NULL;
+ }
+ }
+ break;
+ default:
+ WARN_ON(1);
+ sk = NULL;
+ }
+
+ pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
+ protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
+
+ return sk;
+}
+#endif
+
/**
* tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
* @skb: The skb being processed.
@@ -117,6 +275,15 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
return sk;
}
+/* assign a socket to the skb -- consumes sk */
+static void
+nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+}
+
static unsigned int
tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
u_int32_t mark_mask, u_int32_t mark_value)
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 68ff29f60867..fab6eea1bf38 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -202,7 +202,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
return -EINVAL;
}
if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
- pr_err("ipv6 PROHIBT (THROW, NAT ..) matching not supported\n");
+ pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n");
return -EINVAL;
}
if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 20b15916f403..06df2b9110f5 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -19,12 +19,12 @@
#include <net/icmp.h>
#include <net/sock.h>
#include <net/inet_sock.h>
-#include <net/netfilter/nf_tproxy_core.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#define XT_SOCKET_HAVE_IPV6 1
#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/inet6_hashtables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
@@ -101,6 +101,43 @@ extract_icmp4_fields(const struct sk_buff *skb,
return 0;
}
+/* "socket" match based redirection (no specific rule)
+ * ===================================================
+ *
+ * There are connections with dynamic endpoints (e.g. FTP data
+ * connection) that the user is unable to add explicit rules
+ * for. These are taken care of by a generic "socket" rule. It is
+ * assumed that the proxy application is trusted to open such
+ * connections without explicit iptables rule (except of course the
+ * generic 'socket' rule). In this case the following sockets are
+ * matched in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple
+ *
+ * - match: if there's a non-zero bound listener (possibly with a
+ * non-local address) We don't accept zero-bound listeners, since
+ * then local services could intercept traffic going through the
+ * box.
+ */
+static struct sock *
+xt_socket_get_sock_v4(struct net *net, const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in)
+{
+ switch (protocol) {
+ case IPPROTO_TCP:
+ return __inet_lookup(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ case IPPROTO_UDP:
+ return udp4_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ }
+ return NULL;
+}
+
static bool
socket_match(const struct sk_buff *skb, struct xt_action_param *par,
const struct xt_socket_mtinfo1 *info)
@@ -156,9 +193,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
#endif
if (!sk)
- sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
+ sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol,
saddr, daddr, sport, dport,
- par->in, NFT_LOOKUP_ANY);
+ par->in);
if (sk) {
bool wildcard;
bool transparent = true;
@@ -265,6 +302,25 @@ extract_icmp6_fields(const struct sk_buff *skb,
return 0;
}
+static struct sock *
+xt_socket_get_sock_v6(struct net *net, const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in)
+{
+ switch (protocol) {
+ case IPPROTO_TCP:
+ return inet6_lookup(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ case IPPROTO_UDP:
+ return udp6_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ }
+
+ return NULL;
+}
+
static bool
socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
{
@@ -302,9 +358,9 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
}
if (!sk)
- sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
+ sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto,
saddr, daddr, sport, dport,
- par->in, NFT_LOOKUP_ANY);
+ par->in);
if (sk) {
bool wildcard;
bool transparent = true;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0c61b59175dc..a17dda1bbee0 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -294,14 +294,14 @@ static void **alloc_pg_vec(struct netlink_sock *nlk,
{
unsigned int block_nr = req->nm_block_nr;
unsigned int i;
- void **pg_vec, *ptr;
+ void **pg_vec;
pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
if (pg_vec == NULL)
return NULL;
for (i = 0; i < block_nr; i++) {
- pg_vec[i] = ptr = alloc_one_pg_vec_page(order);
+ pg_vec[i] = alloc_one_pg_vec_page(order);
if (pg_vec[i] == NULL)
goto err1;
}
@@ -595,7 +595,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
* for dumps is performed here. A dump is allowed to continue
* if at least half the ring is unused.
*/
- while (nlk->cb != NULL && netlink_dump_space(nlk)) {
+ while (nlk->cb_running && netlink_dump_space(nlk)) {
err = netlink_dump(sk);
if (err < 0) {
sk->sk_err = err;
@@ -802,18 +802,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
#endif /* CONFIG_NETLINK_MMAP */
-static void netlink_destroy_callback(struct netlink_callback *cb)
-{
- kfree_skb(cb->skb);
- kfree(cb);
-}
-
-static void netlink_consume_callback(struct netlink_callback *cb)
-{
- consume_skb(cb->skb);
- kfree(cb);
-}
-
static void netlink_skb_destructor(struct sk_buff *skb)
{
#ifdef CONFIG_NETLINK_MMAP
@@ -872,12 +860,12 @@ static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
- if (nlk->cb) {
- if (nlk->cb->done)
- nlk->cb->done(nlk->cb);
+ if (nlk->cb_running) {
+ if (nlk->cb.done)
+ nlk->cb.done(&nlk->cb);
- module_put(nlk->cb->module);
- netlink_destroy_callback(nlk->cb);
+ module_put(nlk->cb.module);
+ kfree_skb(nlk->cb.skb);
}
skb_queue_purge(&sk->sk_receive_queue);
@@ -2350,7 +2338,8 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
skb_free_datagram(sk, skb);
- if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ if (nlk->cb_running &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
sk->sk_err = ret;
@@ -2566,13 +2555,12 @@ static int netlink_dump(struct sock *sk)
int alloc_size;
mutex_lock(nlk->cb_mutex);
-
- cb = nlk->cb;
- if (cb == NULL) {
+ if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
}
+ cb = &nlk->cb;
alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
if (!netlink_rx_is_mmaped(sk) &&
@@ -2610,11 +2598,11 @@ static int netlink_dump(struct sock *sk)
if (cb->done)
cb->done(cb);
- nlk->cb = NULL;
- mutex_unlock(nlk->cb_mutex);
+ nlk->cb_running = false;
+ mutex_unlock(nlk->cb_mutex);
module_put(cb->module);
- netlink_consume_callback(cb);
+ consume_skb(cb->skb);
return 0;
errout_skb:
@@ -2632,59 +2620,51 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct netlink_sock *nlk;
int ret;
- cb = kzalloc(sizeof(*cb), GFP_KERNEL);
- if (cb == NULL)
- return -ENOBUFS;
-
/* Memory mapped dump requests need to be copied to avoid looping
* on the pending state in netlink_mmap_sendmsg() while the CB hold
* a reference to the skb.
*/
if (netlink_skb_is_mmaped(skb)) {
skb = skb_copy(skb, GFP_KERNEL);
- if (skb == NULL) {
- kfree(cb);
+ if (skb == NULL)
return -ENOBUFS;
- }
} else
atomic_inc(&skb->users);
- cb->dump = control->dump;
- cb->done = control->done;
- cb->nlh = nlh;
- cb->data = control->data;
- cb->module = control->module;
- cb->min_dump_alloc = control->min_dump_alloc;
- cb->skb = skb;
-
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
if (sk == NULL) {
- netlink_destroy_callback(cb);
- return -ECONNREFUSED;
+ ret = -ECONNREFUSED;
+ goto error_free;
}
- nlk = nlk_sk(sk);
+ nlk = nlk_sk(sk);
mutex_lock(nlk->cb_mutex);
/* A dump is in progress... */
- if (nlk->cb) {
- mutex_unlock(nlk->cb_mutex);
- netlink_destroy_callback(cb);
+ if (nlk->cb_running) {
ret = -EBUSY;
- goto out;
+ goto error_unlock;
}
/* add reference of module which cb->dump belongs to */
- if (!try_module_get(cb->module)) {
- mutex_unlock(nlk->cb_mutex);
- netlink_destroy_callback(cb);
+ if (!try_module_get(control->module)) {
ret = -EPROTONOSUPPORT;
- goto out;
+ goto error_unlock;
}
- nlk->cb = cb;
+ cb = &nlk->cb;
+ memset(cb, 0, sizeof(*cb));
+ cb->dump = control->dump;
+ cb->done = control->done;
+ cb->nlh = nlh;
+ cb->data = control->data;
+ cb->module = control->module;
+ cb->min_dump_alloc = control->min_dump_alloc;
+ cb->skb = skb;
+
+ nlk->cb_running = true;
+
mutex_unlock(nlk->cb_mutex);
ret = netlink_dump(sk);
-out:
sock_put(sk);
if (ret)
@@ -2694,6 +2674,13 @@ out:
* signal not to send ACK even if it was requested.
*/
return -EINTR;
+
+error_unlock:
+ sock_put(sk);
+ mutex_unlock(nlk->cb_mutex);
+error_free:
+ kfree_skb(skb);
+ return ret;
}
EXPORT_SYMBOL(__netlink_dump_start);
@@ -2916,14 +2903,14 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
- seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
+ seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
s,
s->sk_protocol,
nlk->portid,
nlk->groups ? (u32)nlk->groups[0] : 0,
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
- nlk->cb,
+ nlk->cb_running,
atomic_read(&s->sk_refcnt),
atomic_read(&s->sk_drops),
sock_i_ino(s)
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index eaa88d187cdc..acbd774eeb7c 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -32,7 +32,8 @@ struct netlink_sock {
unsigned long *groups;
unsigned long state;
wait_queue_head_t wait;
- struct netlink_callback *cb;
+ bool cb_running;
+ struct netlink_callback cb;
struct mutex *cb_mutex;
struct mutex cb_def_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 1d074dd1650f..e92923cf3e03 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -77,11 +77,19 @@ error:
return rc;
}
-int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
+/**
+ * nfc_fw_download_done - inform that a firmware download was completed
+ *
+ * @dev: The nfc device to which firmware was downloaded
+ * @firmware_name: The firmware filename
+ * @result: The positive value of a standard errno value
+ */
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result)
{
dev->fw_download_in_progress = false;
- return nfc_genl_fw_download_done(dev, firmware_name);
+ return nfc_genl_fw_download_done(dev, firmware_name, result);
}
EXPORT_SYMBOL(nfc_fw_download_done);
@@ -129,7 +137,7 @@ int nfc_dev_up(struct nfc_dev *dev)
/* We have to enable the device before discovering SEs */
if (dev->ops->discover_se) {
rc = dev->ops->discover_se(dev);
- if (!rc)
+ if (rc)
pr_warn("SE discovery failed\n");
}
@@ -575,12 +583,14 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
goto error;
}
- if (se->type == NFC_SE_ENABLED) {
+ if (se->state == NFC_SE_ENABLED) {
rc = -EALREADY;
goto error;
}
rc = dev->ops->enable_se(dev, se_idx);
+ if (rc >= 0)
+ se->state = NFC_SE_ENABLED;
error:
device_unlock(&dev->dev);
@@ -618,12 +628,14 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
goto error;
}
- if (se->type == NFC_SE_DISABLED) {
+ if (se->state == NFC_SE_DISABLED) {
rc = -EALREADY;
goto error;
}
rc = dev->ops->disable_se(dev, se_idx);
+ if (rc >= 0)
+ se->state = NFC_SE_DISABLED;
error:
device_unlock(&dev->dev);
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index fe66908401f5..d07ca4c5cf8c 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -717,7 +717,7 @@ static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
if (hdev->ops->disable_se)
- return hdev->ops->enable_se(hdev, se_idx);
+ return hdev->ops->disable_se(hdev, se_idx);
return 0;
}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index f16fd59d4160..68063b2025da 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1114,7 +1114,8 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
return rc;
}
-int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
+int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result)
{
struct sk_buff *msg;
void *hdr;
@@ -1129,6 +1130,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
goto free_msg;
if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) ||
+ nla_put_u32(msg, NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS, result) ||
nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
goto nla_put_failure;
@@ -1191,6 +1193,91 @@ static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info)
return rc;
}
+static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev,
+ u32 portid, u32 seq,
+ struct netlink_callback *cb,
+ int flags)
+{
+ void *hdr;
+ struct nfc_se *se, *n;
+
+ list_for_each_entry_safe(se, n, &dev->secure_elements, list) {
+ hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags,
+ NFC_CMD_GET_SE);
+ if (!hdr)
+ goto nla_put_failure;
+
+ if (cb)
+ genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
+
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+ nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) ||
+ nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type))
+ goto nla_put_failure;
+
+ if (genlmsg_end(msg, hdr) < 0)
+ goto nla_put_failure;
+ }
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int nfc_genl_dump_ses(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
+ struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
+ bool first_call = false;
+
+ if (!iter) {
+ first_call = true;
+ iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+ cb->args[0] = (long) iter;
+ }
+
+ mutex_lock(&nfc_devlist_mutex);
+
+ cb->seq = nfc_devlist_generation;
+
+ if (first_call) {
+ nfc_device_iter_init(iter);
+ dev = nfc_device_iter_next(iter);
+ }
+
+ while (dev) {
+ int rc;
+
+ rc = nfc_genl_send_se(skb, dev, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
+ if (rc < 0)
+ break;
+
+ dev = nfc_device_iter_next(iter);
+ }
+
+ mutex_unlock(&nfc_devlist_mutex);
+
+ cb->args[1] = (long) dev;
+
+ return skb->len;
+}
+
+static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
+{
+ struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
+
+ nfc_device_iter_exit(iter);
+ kfree(iter);
+
+ return 0;
+}
+
static struct genl_ops nfc_genl_ops[] = {
{
.cmd = NFC_CMD_GET_DEVICE,
@@ -1265,6 +1352,12 @@ static struct genl_ops nfc_genl_ops[] = {
.doit = nfc_genl_disable_se,
.policy = nfc_genl_policy,
},
+ {
+ .cmd = NFC_CMD_GET_SE,
+ .dumpit = nfc_genl_dump_ses,
+ .done = nfc_genl_dump_ses_done,
+ .policy = nfc_genl_policy,
+ },
};
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 820a7850c36a..aaf606fc1faa 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -124,9 +124,8 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
}
int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name);
-int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
-
-int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
+int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result);
int nfc_dev_up(struct nfc_dev *dev);
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 27ee56b688a3..6ecf491ad509 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -4,6 +4,7 @@
config OPENVSWITCH
tristate "Open vSwitch"
+ select LIBCRC32C
---help---
Open vSwitch is a multilayer Ethernet switch targeted at virtualized
environments. In addition to supporting a variety of features
@@ -40,3 +41,16 @@ config OPENVSWITCH_GRE
Say N to exclude this support and reduce the binary size.
If unsure, say Y.
+
+config OPENVSWITCH_VXLAN
+ bool "Open vSwitch VXLAN tunneling support"
+ depends on INET
+ depends on OPENVSWITCH
+ depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m)
+ default y
+ ---help---
+ If you say Y here, then the Open vSwitch will be able create vxlan vport.
+
+ Say N to exclude this support and reduce the binary size.
+
+ If unsure, say Y.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 01bddb2991e3..ea36e99089af 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -10,6 +10,13 @@ openvswitch-y := \
dp_notify.o \
flow.o \
vport.o \
- vport-gre.o \
vport-internal_dev.o \
vport-netdev.o
+
+ifneq ($(CONFIG_OPENVSWITCH_VXLAN),)
+openvswitch-y += vport-vxlan.o
+endif
+
+ifneq ($(CONFIG_OPENVSWITCH_GRE),)
+openvswitch-y += vport-gre.o
+endif
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ab101f715447..65cfaa816075 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira, Inc.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -22,6 +22,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/openvswitch.h>
+#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in6.h>
@@ -31,6 +32,7 @@
#include <net/ipv6.h>
#include <net/checksum.h>
#include <net/dsfield.h>
+#include <net/sctp/checksum.h>
#include "datapath.h"
#include "vport.h"
@@ -352,6 +354,39 @@ static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
return 0;
}
+static int set_sctp(struct sk_buff *skb,
+ const struct ovs_key_sctp *sctp_port_key)
+{
+ struct sctphdr *sh;
+ int err;
+ unsigned int sctphoff = skb_transport_offset(skb);
+
+ err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
+ if (unlikely(err))
+ return err;
+
+ sh = sctp_hdr(skb);
+ if (sctp_port_key->sctp_src != sh->source ||
+ sctp_port_key->sctp_dst != sh->dest) {
+ __le32 old_correct_csum, new_csum, old_csum;
+
+ old_csum = sh->checksum;
+ old_correct_csum = sctp_compute_cksum(skb, sctphoff);
+
+ sh->source = sctp_port_key->sctp_src;
+ sh->dest = sctp_port_key->sctp_dst;
+
+ new_csum = sctp_compute_cksum(skb, sctphoff);
+
+ /* Carry any checksum errors through. */
+ sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
+
+ skb->rxhash = 0;
+ }
+
+ return 0;
+}
+
static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
{
struct vport *vport;
@@ -376,8 +411,10 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
const struct nlattr *a;
int rem;
+ BUG_ON(!OVS_CB(skb)->pkt_key);
+
upcall.cmd = OVS_PACKET_CMD_ACTION;
- upcall.key = &OVS_CB(skb)->flow->key;
+ upcall.key = OVS_CB(skb)->pkt_key;
upcall.userdata = NULL;
upcall.portid = 0;
@@ -459,6 +496,10 @@ static int execute_set_action(struct sk_buff *skb,
case OVS_KEY_ATTR_UDP:
err = set_udp(skb, nla_data(nested_attr));
break;
+
+ case OVS_KEY_ATTR_SCTP:
+ err = set_sctp(skb, nla_data(nested_attr));
+ break;
}
return err;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f2ed7600084e..2aa13bd7f2b2 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira, Inc.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -165,7 +165,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
+ ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false);
free_percpu(dp->stats_percpu);
release_net(ovs_dp_get_net(dp));
kfree(dp->ports);
@@ -226,19 +226,18 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
struct sw_flow_key key;
u64 *stats_counter;
int error;
- int key_len;
stats = this_cpu_ptr(dp->stats_percpu);
/* Extract flow from 'skb' into 'key'. */
- error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
+ error = ovs_flow_extract(skb, p->port_no, &key);
if (unlikely(error)) {
kfree_skb(skb);
return;
}
/* Look up flow. */
- flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+ flow = ovs_flow_lookup(rcu_dereference(dp->table), &key);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
@@ -253,6 +252,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
}
OVS_CB(skb)->flow = flow;
+ OVS_CB(skb)->pkt_key = &key;
stats_counter = &stats->n_hit;
ovs_flow_used(OVS_CB(skb)->flow, skb);
@@ -435,7 +435,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
- ovs_flow_to_nlattrs(upcall_info->key, user_skb);
+ ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb);
nla_nest_end(user_skb, nla);
if (upcall_info->userdata)
@@ -468,7 +468,7 @@ static int flush_flows(struct datapath *dp)
rcu_assign_pointer(dp->table, new_table);
- ovs_flow_tbl_deferred_destroy(old_table);
+ ovs_flow_tbl_destroy(old_table, true);
return 0;
}
@@ -611,10 +611,12 @@ static int validate_tp_port(const struct sw_flow_key *flow_key)
static int validate_and_copy_set_tun(const struct nlattr *attr,
struct sw_flow_actions **sfa)
{
- struct ovs_key_ipv4_tunnel tun_key;
+ struct sw_flow_match match;
+ struct sw_flow_key key;
int err, start;
- err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &tun_key);
+ ovs_match_init(&match, &key, NULL);
+ err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false);
if (err)
return err;
@@ -622,7 +624,8 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
if (start < 0)
return start;
- err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key));
+ err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
+ sizeof(match.key->tun_key));
add_nested_action_end(*sfa, start);
return err;
@@ -709,6 +712,12 @@ static int validate_set(const struct nlattr *a,
return validate_tp_port(flow_key);
+ case OVS_KEY_ATTR_SCTP:
+ if (flow_key->ip.proto != IPPROTO_SCTP)
+ return -EINVAL;
+
+ return validate_tp_port(flow_key);
+
default:
return -EINVAL;
}
@@ -857,7 +866,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct ethhdr *eth;
int len;
int err;
- int key_len;
err = -EINVAL;
if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -890,11 +898,11 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(flow))
goto err_kfree_skb;
- err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
+ err = ovs_flow_extract(packet, -1, &flow->key);
if (err)
goto err_flow_free;
- err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]);
+ err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]);
if (err)
goto err_flow_free;
acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
@@ -908,6 +916,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
goto err_flow_free;
OVS_CB(packet)->flow = flow;
+ OVS_CB(packet)->pkt_key = &flow->key;
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
@@ -922,13 +931,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
local_bh_enable();
rcu_read_unlock();
- ovs_flow_free(flow);
+ ovs_flow_free(flow, false);
return err;
err_unlock:
rcu_read_unlock();
err_flow_free:
- ovs_flow_free(flow);
+ ovs_flow_free(flow, false);
err_kfree_skb:
kfree_skb(packet);
err:
@@ -951,9 +960,10 @@ static struct genl_ops dp_packet_genl_ops[] = {
static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
{
+ struct flow_table *table;
int i;
- struct flow_table *table = ovsl_dereference(dp->table);
+ table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held());
stats->n_flows = ovs_flow_tbl_count(table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
@@ -1044,7 +1054,8 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
if (!start)
return -EMSGSIZE;
- err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key));
+ err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
+ nla_data(ovs_key));
if (err)
return err;
nla_nest_end(skb, start);
@@ -1092,6 +1103,7 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
{
return NLMSG_ALIGN(sizeof(struct ovs_header))
+ nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
+ + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
@@ -1104,7 +1116,6 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
u32 seq, u32 flags, u8 cmd)
{
const int skb_orig_len = skb->len;
- const struct sw_flow_actions *sf_acts;
struct nlattr *start;
struct ovs_flow_stats stats;
struct ovs_header *ovs_header;
@@ -1113,20 +1124,31 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
u8 tcp_flags;
int err;
- sf_acts = ovsl_dereference(flow->sf_acts);
-
ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
if (!ovs_header)
return -EMSGSIZE;
ovs_header->dp_ifindex = get_dpifindex(dp);
+ /* Fill flow key. */
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
goto nla_put_failure;
- err = ovs_flow_to_nlattrs(&flow->key, skb);
+
+ err = ovs_flow_to_nlattrs(&flow->unmasked_key,
+ &flow->unmasked_key, skb);
+ if (err)
+ goto error;
+ nla_nest_end(skb, nla);
+
+ nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
+ if (!nla)
+ goto nla_put_failure;
+
+ err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb);
if (err)
goto error;
+
nla_nest_end(skb, nla);
spin_lock_bh(&flow->lock);
@@ -1161,6 +1183,11 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
*/
start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
if (start) {
+ const struct sw_flow_actions *sf_acts;
+
+ sf_acts = rcu_dereference_check(flow->sf_acts,
+ lockdep_ovsl_is_held());
+
err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
if (!err)
nla_nest_end(skb, start);
@@ -1211,20 +1238,24 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
- struct sw_flow_key key;
- struct sw_flow *flow;
+ struct sw_flow_key key, masked_key;
+ struct sw_flow *flow = NULL;
+ struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
struct flow_table *table;
struct sw_flow_actions *acts = NULL;
+ struct sw_flow_match match;
int error;
- int key_len;
/* Extract key. */
error = -EINVAL;
if (!a[OVS_FLOW_ATTR_KEY])
goto error;
- error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+
+ ovs_match_init(&match, &key, &mask);
+ error = ovs_match_from_nlattrs(&match,
+ a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
if (error)
goto error;
@@ -1235,9 +1266,13 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(acts))
goto error;
- error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0, &acts);
- if (error)
+ ovs_flow_key_mask(&masked_key, &key, &mask);
+ error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
+ &masked_key, 0, &acts);
+ if (error) {
+ OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
goto err_kfree;
+ }
} else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
error = -EINVAL;
goto error;
@@ -1250,8 +1285,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
table = ovsl_dereference(dp->table);
- flow = ovs_flow_tbl_lookup(table, &key, key_len);
+
+ /* Check if this is a duplicate flow */
+ flow = ovs_flow_lookup(table, &key);
if (!flow) {
+ struct sw_flow_mask *mask_p;
/* Bail out if we're not allowed to create a new flow. */
error = -ENOENT;
if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
@@ -1264,7 +1302,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
new_table = ovs_flow_tbl_expand(table);
if (!IS_ERR(new_table)) {
rcu_assign_pointer(dp->table, new_table);
- ovs_flow_tbl_deferred_destroy(table);
+ ovs_flow_tbl_destroy(table, true);
table = ovsl_dereference(dp->table);
}
}
@@ -1277,14 +1315,30 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
}
clear_stats(flow);
+ flow->key = masked_key;
+ flow->unmasked_key = key;
+
+ /* Make sure mask is unique in the system */
+ mask_p = ovs_sw_flow_mask_find(table, &mask);
+ if (!mask_p) {
+ /* Allocate a new mask if none exsits. */
+ mask_p = ovs_sw_flow_mask_alloc();
+ if (!mask_p)
+ goto err_flow_free;
+ mask_p->key = mask.key;
+ mask_p->range = mask.range;
+ ovs_sw_flow_mask_insert(table, mask_p);
+ }
+
+ ovs_sw_flow_mask_add_ref(mask_p);
+ flow->mask = mask_p;
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- ovs_flow_tbl_insert(table, flow, &key, key_len);
+ ovs_flow_insert(table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
- info->snd_seq,
- OVS_FLOW_CMD_NEW);
+ info->snd_seq, OVS_FLOW_CMD_NEW);
} else {
/* We found a matching flow. */
struct sw_flow_actions *old_acts;
@@ -1300,6 +1354,13 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
goto err_unlock_ovs;
+ /* The unmasked key has to be the same for flow updates. */
+ error = -EINVAL;
+ if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) {
+ OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
+ goto err_unlock_ovs;
+ }
+
/* Update actions. */
old_acts = ovsl_dereference(flow->sf_acts);
rcu_assign_pointer(flow->sf_acts, acts);
@@ -1324,6 +1385,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
return 0;
+err_flow_free:
+ ovs_flow_free(flow, false);
err_unlock_ovs:
ovs_unlock();
err_kfree:
@@ -1341,12 +1404,16 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
struct sw_flow *flow;
struct datapath *dp;
struct flow_table *table;
+ struct sw_flow_match match;
int err;
- int key_len;
- if (!a[OVS_FLOW_ATTR_KEY])
+ if (!a[OVS_FLOW_ATTR_KEY]) {
+ OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
return -EINVAL;
- err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ }
+
+ ovs_match_init(&match, &key, NULL);
+ err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
return err;
@@ -1358,7 +1425,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
}
table = ovsl_dereference(dp->table);
- flow = ovs_flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_lookup_unmasked_key(table, &match);
if (!flow) {
err = -ENOENT;
goto unlock;
@@ -1387,8 +1454,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
struct sw_flow *flow;
struct datapath *dp;
struct flow_table *table;
+ struct sw_flow_match match;
int err;
- int key_len;
ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
@@ -1401,12 +1468,14 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
err = flush_flows(dp);
goto unlock;
}
- err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+
+ ovs_match_init(&match, &key, NULL);
+ err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
goto unlock;
table = ovsl_dereference(dp->table);
- flow = ovs_flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_lookup_unmasked_key(table, &match);
if (!flow) {
err = -ENOENT;
goto unlock;
@@ -1418,13 +1487,13 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- ovs_flow_tbl_remove(table, flow);
+ ovs_flow_remove(table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
BUG_ON(err < 0);
- ovs_flow_deferred_free(flow);
+ ovs_flow_free(flow, true);
ovs_unlock();
ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
@@ -1440,22 +1509,21 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct datapath *dp;
struct flow_table *table;
- ovs_lock();
+ rcu_read_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
- ovs_unlock();
+ rcu_read_unlock();
return -ENODEV;
}
- table = ovsl_dereference(dp->table);
-
+ table = rcu_dereference(dp->table);
for (;;) {
struct sw_flow *flow;
u32 bucket, obj;
bucket = cb->args[0];
obj = cb->args[1];
- flow = ovs_flow_tbl_next(table, &bucket, &obj);
+ flow = ovs_flow_dump_next(table, &bucket, &obj);
if (!flow)
break;
@@ -1468,7 +1536,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[0] = bucket;
cb->args[1] = obj;
}
- ovs_unlock();
+ rcu_read_unlock();
return skb->len;
}
@@ -1664,7 +1732,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto err_destroy_local_port;
ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
- list_add_tail(&dp->list_node, &ovs_net->dps);
+ list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
ovs_unlock();
@@ -1678,7 +1746,7 @@ err_destroy_ports_array:
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- ovs_flow_tbl_destroy(ovsl_dereference(dp->table));
+ ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false);
err_free_dp:
release_net(ovs_dp_get_net(dp));
kfree(dp);
@@ -1702,7 +1770,7 @@ static void __dp_destroy(struct datapath *dp)
ovs_dp_detach_port(vport);
}
- list_del(&dp->list_node);
+ list_del_rcu(&dp->list_node);
/* OVSP_LOCAL is datapath internal port. We need to make sure that
* all port in datapath are destroyed first before freeing datapath.
@@ -1807,8 +1875,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
int skip = cb->args[0];
int i = 0;
- ovs_lock();
- list_for_each_entry(dp, &ovs_net->dps, list_node) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
if (i >= skip &&
ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
@@ -1816,7 +1884,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
break;
i++;
}
- ovs_unlock();
+ rcu_read_unlock();
cb->args[0] = i;
@@ -2285,7 +2353,7 @@ static void rehash_flow_table(struct work_struct *work)
new_table = ovs_flow_tbl_rehash(old_table);
if (!IS_ERR(new_table)) {
rcu_assign_pointer(dp->table, new_table);
- ovs_flow_tbl_deferred_destroy(old_table);
+ ovs_flow_tbl_destroy(old_table, true);
}
}
}
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index a91486484916..4d109c176ef3 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -88,11 +88,13 @@ struct datapath {
/**
* struct ovs_skb_cb - OVS data in skb CB
* @flow: The flow associated with this packet. May be %NULL if no flow.
+ * @pkt_key: The flow information extracted from the packet. Must be nonnull.
* @tun_key: Key for the tunnel that encapsulated this packet. NULL if the
* packet is not being tunneled.
*/
struct ovs_skb_cb {
struct sw_flow *flow;
+ struct sw_flow_key *pkt_key;
struct ovs_key_ipv4_tunnel *tun_key;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
@@ -183,4 +185,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
void ovs_dp_notify_wq(struct work_struct *work);
+
+#define OVS_NLERR(fmt, ...) \
+ pr_info_once("netlink: " fmt, ##__VA_ARGS__)
+
#endif /* datapath.h */
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 1aa84dc58777..fb36f8565161 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira, Inc.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -34,6 +34,7 @@
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
@@ -46,6 +47,202 @@
static struct kmem_cache *flow_cache;
+static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
+ struct sw_flow_key_range *range, u8 val);
+
+static void update_range__(struct sw_flow_match *match,
+ size_t offset, size_t size, bool is_mask)
+{
+ struct sw_flow_key_range *range = NULL;
+ size_t start = rounddown(offset, sizeof(long));
+ size_t end = roundup(offset + size, sizeof(long));
+
+ if (!is_mask)
+ range = &match->range;
+ else if (match->mask)
+ range = &match->mask->range;
+
+ if (!range)
+ return;
+
+ if (range->start == range->end) {
+ range->start = start;
+ range->end = end;
+ return;
+ }
+
+ if (range->start > start)
+ range->start = start;
+
+ if (range->end < end)
+ range->end = end;
+}
+
+#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
+ do { \
+ update_range__(match, offsetof(struct sw_flow_key, field), \
+ sizeof((match)->key->field), is_mask); \
+ if (is_mask) { \
+ if ((match)->mask) \
+ (match)->mask->key.field = value; \
+ } else { \
+ (match)->key->field = value; \
+ } \
+ } while (0)
+
+#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
+ do { \
+ update_range__(match, offsetof(struct sw_flow_key, field), \
+ len, is_mask); \
+ if (is_mask) { \
+ if ((match)->mask) \
+ memcpy(&(match)->mask->key.field, value_p, len);\
+ } else { \
+ memcpy(&(match)->key->field, value_p, len); \
+ } \
+ } while (0)
+
+static u16 range_n_bytes(const struct sw_flow_key_range *range)
+{
+ return range->end - range->start;
+}
+
+void ovs_match_init(struct sw_flow_match *match,
+ struct sw_flow_key *key,
+ struct sw_flow_mask *mask)
+{
+ memset(match, 0, sizeof(*match));
+ match->key = key;
+ match->mask = mask;
+
+ memset(key, 0, sizeof(*key));
+
+ if (mask) {
+ memset(&mask->key, 0, sizeof(mask->key));
+ mask->range.start = mask->range.end = 0;
+ }
+}
+
+static bool ovs_match_validate(const struct sw_flow_match *match,
+ u64 key_attrs, u64 mask_attrs)
+{
+ u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
+ u64 mask_allowed = key_attrs; /* At most allow all key attributes */
+
+ /* The following mask attributes allowed only if they
+ * pass the validation tests. */
+ mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
+ | (1 << OVS_KEY_ATTR_IPV6)
+ | (1 << OVS_KEY_ATTR_TCP)
+ | (1 << OVS_KEY_ATTR_UDP)
+ | (1 << OVS_KEY_ATTR_SCTP)
+ | (1 << OVS_KEY_ATTR_ICMP)
+ | (1 << OVS_KEY_ATTR_ICMPV6)
+ | (1 << OVS_KEY_ATTR_ARP)
+ | (1 << OVS_KEY_ATTR_ND));
+
+ /* Always allowed mask fields. */
+ mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
+ | (1 << OVS_KEY_ATTR_IN_PORT)
+ | (1 << OVS_KEY_ATTR_ETHERTYPE));
+
+ /* Check key attributes. */
+ if (match->key->eth.type == htons(ETH_P_ARP)
+ || match->key->eth.type == htons(ETH_P_RARP)) {
+ key_expected |= 1 << OVS_KEY_ATTR_ARP;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
+ }
+
+ if (match->key->eth.type == htons(ETH_P_IP)) {
+ key_expected |= 1 << OVS_KEY_ATTR_IPV4;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
+
+ if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
+ if (match->key->ip.proto == IPPROTO_UDP) {
+ key_expected |= 1 << OVS_KEY_ATTR_UDP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_SCTP) {
+ key_expected |= 1 << OVS_KEY_ATTR_SCTP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_TCP) {
+ key_expected |= 1 << OVS_KEY_ATTR_TCP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_ICMP) {
+ key_expected |= 1 << OVS_KEY_ATTR_ICMP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
+ }
+ }
+ }
+
+ if (match->key->eth.type == htons(ETH_P_IPV6)) {
+ key_expected |= 1 << OVS_KEY_ATTR_IPV6;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
+
+ if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
+ if (match->key->ip.proto == IPPROTO_UDP) {
+ key_expected |= 1 << OVS_KEY_ATTR_UDP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_SCTP) {
+ key_expected |= 1 << OVS_KEY_ATTR_SCTP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_TCP) {
+ key_expected |= 1 << OVS_KEY_ATTR_TCP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_ICMPV6) {
+ key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
+
+ if (match->key->ipv6.tp.src ==
+ htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+ match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+ key_expected |= 1 << OVS_KEY_ATTR_ND;
+ if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ND;
+ }
+ }
+ }
+ }
+
+ if ((key_attrs & key_expected) != key_expected) {
+ /* Key attributes check failed. */
+ OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
+ key_attrs, key_expected);
+ return false;
+ }
+
+ if ((mask_attrs & mask_allowed) != mask_attrs) {
+ /* Mask attributes check failed. */
+ OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
+ mask_attrs, mask_allowed);
+ return false;
+ }
+
+ return true;
+}
+
static int check_header(struct sk_buff *skb, int len)
{
if (unlikely(skb->len < len))
@@ -102,6 +299,12 @@ static bool udphdr_ok(struct sk_buff *skb)
sizeof(struct udphdr));
}
+static bool sctphdr_ok(struct sk_buff *skb)
+{
+ return pskb_may_pull(skb, skb_transport_offset(skb) +
+ sizeof(struct sctphdr));
+}
+
static bool icmphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
@@ -121,12 +324,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
return cur_ms - idle_ms;
}
-#define SW_FLOW_KEY_OFFSET(field) \
- (offsetof(struct sw_flow_key, field) + \
- FIELD_SIZEOF(struct sw_flow_key, field))
-
-static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
- int *key_lenp)
+static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
{
unsigned int nh_ofs = skb_network_offset(skb);
unsigned int nh_len;
@@ -136,8 +334,6 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
__be16 frag_off;
int err;
- *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
-
err = check_header(skb, nh_ofs + sizeof(*nh));
if (unlikely(err))
return err;
@@ -176,6 +372,22 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
sizeof(struct icmp6hdr));
}
+void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
+ const struct sw_flow_mask *mask)
+{
+ const long *m = (long *)((u8 *)&mask->key + mask->range.start);
+ const long *s = (long *)((u8 *)src + mask->range.start);
+ long *d = (long *)((u8 *)dst + mask->range.start);
+ int i;
+
+ /* The memory outside of the 'mask->range' are not set since
+ * further operations on 'dst' only uses contents within
+ * 'mask->range'.
+ */
+ for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
+ *d++ = *s++ & *m++;
+}
+
#define TCP_FLAGS_OFFSET 13
#define TCP_FLAG_MASK 0x3f
@@ -224,6 +436,7 @@ struct sw_flow *ovs_flow_alloc(void)
spin_lock_init(&flow->lock);
flow->sf_acts = NULL;
+ flow->mask = NULL;
return flow;
}
@@ -263,7 +476,7 @@ static void free_buckets(struct flex_array *buckets)
flex_array_free(buckets);
}
-struct flow_table *ovs_flow_tbl_alloc(int new_size)
+static struct flow_table *__flow_tbl_alloc(int new_size)
{
struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
@@ -281,17 +494,15 @@ struct flow_table *ovs_flow_tbl_alloc(int new_size)
table->node_ver = 0;
table->keep_flows = false;
get_random_bytes(&table->hash_seed, sizeof(u32));
+ table->mask_list = NULL;
return table;
}
-void ovs_flow_tbl_destroy(struct flow_table *table)
+static void __flow_tbl_destroy(struct flow_table *table)
{
int i;
- if (!table)
- return;
-
if (table->keep_flows)
goto skip_flows;
@@ -302,32 +513,56 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
int ver = table->node_ver;
hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
- hlist_del_rcu(&flow->hash_node[ver]);
- ovs_flow_free(flow);
+ hlist_del(&flow->hash_node[ver]);
+ ovs_flow_free(flow, false);
}
}
+ BUG_ON(!list_empty(table->mask_list));
+ kfree(table->mask_list);
+
skip_flows:
free_buckets(table->buckets);
kfree(table);
}
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
+{
+ struct flow_table *table = __flow_tbl_alloc(new_size);
+
+ if (!table)
+ return NULL;
+
+ table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!table->mask_list) {
+ table->keep_flows = true;
+ __flow_tbl_destroy(table);
+ return NULL;
+ }
+ INIT_LIST_HEAD(table->mask_list);
+
+ return table;
+}
+
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
struct flow_table *table = container_of(rcu, struct flow_table, rcu);
- ovs_flow_tbl_destroy(table);
+ __flow_tbl_destroy(table);
}
-void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
{
if (!table)
return;
- call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+ if (deferred)
+ call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+ else
+ __flow_tbl_destroy(table);
}
-struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last)
{
struct sw_flow *flow;
struct hlist_head *head;
@@ -353,11 +588,13 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
return NULL;
}
-static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
{
struct hlist_head *head;
+
head = find_bucket(table, flow->hash);
hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
+
table->count++;
}
@@ -377,8 +614,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new
head = flex_array_get(old->buckets, i);
hlist_for_each_entry(flow, head, hash_node[old_ver])
- __flow_tbl_insert(new, flow);
+ __tbl_insert(new, flow);
}
+
+ new->mask_list = old->mask_list;
old->keep_flows = true;
}
@@ -386,7 +625,7 @@ static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buck
{
struct flow_table *new_table;
- new_table = ovs_flow_tbl_alloc(n_buckets);
+ new_table = __flow_tbl_alloc(n_buckets);
if (!new_table)
return ERR_PTR(-ENOMEM);
@@ -405,28 +644,30 @@ struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
return __flow_tbl_rehash(table, table->n_buckets * 2);
}
-void ovs_flow_free(struct sw_flow *flow)
+static void __flow_free(struct sw_flow *flow)
{
- if (unlikely(!flow))
- return;
-
kfree((struct sf_flow_acts __force *)flow->sf_acts);
kmem_cache_free(flow_cache, flow);
}
-/* RCU callback used by ovs_flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
- ovs_flow_free(flow);
+ __flow_free(flow);
}
-/* Schedules 'flow' to be freed after the next RCU grace period.
- * The caller must hold rcu_read_lock for this to be sensible. */
-void ovs_flow_deferred_free(struct sw_flow *flow)
+void ovs_flow_free(struct sw_flow *flow, bool deferred)
{
- call_rcu(&flow->rcu, rcu_free_flow_callback);
+ if (!flow)
+ return;
+
+ ovs_sw_flow_mask_del_ref(flow->mask, deferred);
+
+ if (deferred)
+ call_rcu(&flow->rcu, rcu_free_flow_callback);
+ else
+ __flow_free(flow);
}
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
@@ -497,18 +738,15 @@ static __be16 parse_ethertype(struct sk_buff *skb)
}
static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
- int *key_lenp, int nh_len)
+ int nh_len)
{
struct icmp6hdr *icmp = icmp6_hdr(skb);
- int error = 0;
- int key_len;
/* The ICMPv6 type and code fields use the 16-bit transport port
* fields, so we need to store them in 16-bit network byte order.
*/
key->ipv6.tp.src = htons(icmp->icmp6_type);
key->ipv6.tp.dst = htons(icmp->icmp6_code);
- key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (icmp->icmp6_code == 0 &&
(icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
@@ -517,21 +755,17 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
struct nd_msg *nd;
int offset;
- key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
-
/* In order to process neighbor discovery options, we need the
* entire packet.
*/
if (unlikely(icmp_len < sizeof(*nd)))
- goto out;
- if (unlikely(skb_linearize(skb))) {
- error = -ENOMEM;
- goto out;
- }
+ return 0;
+
+ if (unlikely(skb_linearize(skb)))
+ return -ENOMEM;
nd = (struct nd_msg *)skb_transport_header(skb);
key->ipv6.nd.target = nd->target;
- key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
icmp_len -= sizeof(*nd);
offset = 0;
@@ -541,7 +775,7 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
int opt_len = nd_opt->nd_opt_len * 8;
if (unlikely(!opt_len || opt_len > icmp_len))
- goto invalid;
+ return 0;
/* Store the link layer address if the appropriate
* option is provided. It is considered an error if
@@ -566,16 +800,14 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
}
}
- goto out;
+ return 0;
invalid:
memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
-out:
- *key_lenp = key_len;
- return error;
+ return 0;
}
/**
@@ -584,7 +816,6 @@ out:
* Ethernet header
* @in_port: port number on which @skb was received.
* @key: output flow key
- * @key_lenp: length of output flow key
*
* The caller must ensure that skb->len >= ETH_HLEN.
*
@@ -602,11 +833,9 @@ out:
* of a correct length, otherwise the same as skb->network_header.
* For other key->eth.type values it is left untouched.
*/
-int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
- int *key_lenp)
+int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
{
- int error = 0;
- int key_len = SW_FLOW_KEY_OFFSET(eth);
+ int error;
struct ethhdr *eth;
memset(key, 0, sizeof(*key));
@@ -649,15 +878,13 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
struct iphdr *nh;
__be16 offset;
- key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
-
error = check_iphdr(skb);
if (unlikely(error)) {
if (error == -EINVAL) {
skb->transport_header = skb->network_header;
error = 0;
}
- goto out;
+ return error;
}
nh = ip_hdr(skb);
@@ -671,7 +898,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
offset = nh->frag_off & htons(IP_OFFSET);
if (offset) {
key->ip.frag = OVS_FRAG_TYPE_LATER;
- goto out;
+ return 0;
}
if (nh->frag_off & htons(IP_MF) ||
skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
@@ -679,21 +906,24 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
/* Transport layer. */
if (key->ip.proto == IPPROTO_TCP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
if (tcphdr_ok(skb)) {
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv4.tp.src = tcp->source;
key->ipv4.tp.dst = tcp->dest;
}
} else if (key->ip.proto == IPPROTO_UDP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->ipv4.tp.src = udp->source;
key->ipv4.tp.dst = udp->dest;
}
+ } else if (key->ip.proto == IPPROTO_SCTP) {
+ if (sctphdr_ok(skb)) {
+ struct sctphdr *sctp = sctp_hdr(skb);
+ key->ipv4.tp.src = sctp->source;
+ key->ipv4.tp.dst = sctp->dest;
+ }
} else if (key->ip.proto == IPPROTO_ICMP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
if (icmphdr_ok(skb)) {
struct icmphdr *icmp = icmp_hdr(skb);
/* The ICMP type and code fields use the 16-bit
@@ -722,102 +952,175 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
- key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
}
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
- nh_len = parse_ipv6hdr(skb, key, &key_len);
+ nh_len = parse_ipv6hdr(skb, key);
if (unlikely(nh_len < 0)) {
- if (nh_len == -EINVAL)
+ if (nh_len == -EINVAL) {
skb->transport_header = skb->network_header;
- else
+ error = 0;
+ } else {
error = nh_len;
- goto out;
+ }
+ return error;
}
if (key->ip.frag == OVS_FRAG_TYPE_LATER)
- goto out;
+ return 0;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
/* Transport layer. */
if (key->ip.proto == NEXTHDR_TCP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (tcphdr_ok(skb)) {
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv6.tp.src = tcp->source;
key->ipv6.tp.dst = tcp->dest;
}
} else if (key->ip.proto == NEXTHDR_UDP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->ipv6.tp.src = udp->source;
key->ipv6.tp.dst = udp->dest;
}
+ } else if (key->ip.proto == NEXTHDR_SCTP) {
+ if (sctphdr_ok(skb)) {
+ struct sctphdr *sctp = sctp_hdr(skb);
+ key->ipv6.tp.src = sctp->source;
+ key->ipv6.tp.dst = sctp->dest;
+ }
} else if (key->ip.proto == NEXTHDR_ICMP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (icmp6hdr_ok(skb)) {
- error = parse_icmpv6(skb, key, &key_len, nh_len);
- if (error < 0)
- goto out;
+ error = parse_icmpv6(skb, key, nh_len);
+ if (error)
+ return error;
}
}
}
-out:
- *key_lenp = key_len;
- return error;
+ return 0;
}
-static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len)
+static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start,
+ int key_end)
{
- return jhash2((u32 *)((u8 *)key + key_start),
- DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0);
+ u32 *hash_key = (u32 *)((u8 *)key + key_start);
+ int hash_u32s = (key_end - key_start) >> 2;
+
+ /* Make sure number of hash bytes are multiple of u32. */
+ BUILD_BUG_ON(sizeof(long) % sizeof(u32));
+
+ return jhash2(hash_key, hash_u32s, 0);
}
-static int flow_key_start(struct sw_flow_key *key)
+static int flow_key_start(const struct sw_flow_key *key)
{
if (key->tun_key.ipv4_dst)
return 0;
else
- return offsetof(struct sw_flow_key, phy);
+ return rounddown(offsetof(struct sw_flow_key, phy),
+ sizeof(long));
+}
+
+static bool __cmp_key(const struct sw_flow_key *key1,
+ const struct sw_flow_key *key2, int key_start, int key_end)
+{
+ const long *cp1 = (long *)((u8 *)key1 + key_start);
+ const long *cp2 = (long *)((u8 *)key2 + key_start);
+ long diffs = 0;
+ int i;
+
+ for (i = key_start; i < key_end; i += sizeof(long))
+ diffs |= *cp1++ ^ *cp2++;
+
+ return diffs == 0;
}
-struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
- struct sw_flow_key *key, int key_len)
+static bool __flow_cmp_masked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key, int key_start, int key_end)
+{
+ return __cmp_key(&flow->key, key, key_start, key_end);
+}
+
+static bool __flow_cmp_unmasked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key, int key_start, int key_end)
+{
+ return __cmp_key(&flow->unmasked_key, key, key_start, key_end);
+}
+
+bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key, int key_end)
+{
+ int key_start;
+ key_start = flow_key_start(key);
+
+ return __flow_cmp_unmasked_key(flow, key, key_start, key_end);
+
+}
+
+struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
+ struct sw_flow_match *match)
+{
+ struct sw_flow_key *unmasked = match->key;
+ int key_end = match->range.end;
+ struct sw_flow *flow;
+
+ flow = ovs_flow_lookup(table, unmasked);
+ if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end)))
+ flow = NULL;
+
+ return flow;
+}
+
+static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table,
+ const struct sw_flow_key *unmasked,
+ struct sw_flow_mask *mask)
{
struct sw_flow *flow;
struct hlist_head *head;
- u8 *_key;
- int key_start;
+ int key_start = mask->range.start;
+ int key_end = mask->range.end;
u32 hash;
+ struct sw_flow_key masked_key;
- key_start = flow_key_start(key);
- hash = ovs_flow_hash(key, key_start, key_len);
-
- _key = (u8 *) key + key_start;
+ ovs_flow_key_mask(&masked_key, unmasked, mask);
+ hash = ovs_flow_hash(&masked_key, key_start, key_end);
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
-
- if (flow->hash == hash &&
- !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) {
+ if (flow->mask == mask &&
+ __flow_cmp_masked_key(flow, &masked_key,
+ key_start, key_end))
return flow;
- }
}
return NULL;
}
-void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
- struct sw_flow_key *key, int key_len)
+struct sw_flow *ovs_flow_lookup(struct flow_table *tbl,
+ const struct sw_flow_key *key)
+{
+ struct sw_flow *flow = NULL;
+ struct sw_flow_mask *mask;
+
+ list_for_each_entry_rcu(mask, tbl->mask_list, list) {
+ flow = ovs_masked_flow_lookup(tbl, key, mask);
+ if (flow) /* Found */
+ break;
+ }
+
+ return flow;
+}
+
+
+void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow)
{
- flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len);
- memcpy(&flow->key, key, sizeof(flow->key));
- __flow_tbl_insert(table, flow);
+ flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start,
+ flow->mask->range.end);
+ __tbl_insert(table, flow);
}
-void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow)
{
BUG_ON(table->count == 0);
hlist_del_rcu(&flow->hash_node[table->node_ver]);
@@ -837,6 +1140,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
[OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
[OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
+ [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
[OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
[OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
[OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
@@ -844,149 +1148,84 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_TUNNEL] = -1,
};
-static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
- const struct nlattr *a[], u32 *attrs)
+static bool is_all_zero(const u8 *fp, size_t size)
{
- const struct ovs_key_icmp *icmp_key;
- const struct ovs_key_tcp *tcp_key;
- const struct ovs_key_udp *udp_key;
-
- switch (swkey->ip.proto) {
- case IPPROTO_TCP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
- tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
- swkey->ipv4.tp.src = tcp_key->tcp_src;
- swkey->ipv4.tp.dst = tcp_key->tcp_dst;
- break;
-
- case IPPROTO_UDP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
- udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
- swkey->ipv4.tp.src = udp_key->udp_src;
- swkey->ipv4.tp.dst = udp_key->udp_dst;
- break;
-
- case IPPROTO_ICMP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
- icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
- swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
- swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
- break;
- }
-
- return 0;
-}
-
-static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
- const struct nlattr *a[], u32 *attrs)
-{
- const struct ovs_key_icmpv6 *icmpv6_key;
- const struct ovs_key_tcp *tcp_key;
- const struct ovs_key_udp *udp_key;
-
- switch (swkey->ip.proto) {
- case IPPROTO_TCP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
- tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
- swkey->ipv6.tp.src = tcp_key->tcp_src;
- swkey->ipv6.tp.dst = tcp_key->tcp_dst;
- break;
-
- case IPPROTO_UDP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
- udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
- swkey->ipv6.tp.src = udp_key->udp_src;
- swkey->ipv6.tp.dst = udp_key->udp_dst;
- break;
-
- case IPPROTO_ICMPV6:
- if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
- icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
- swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
- swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
+ int i;
- if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
- swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
- const struct ovs_key_nd *nd_key;
+ if (!fp)
+ return false;
- if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_ND);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
- nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
- memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
- sizeof(swkey->ipv6.nd.target));
- memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
- memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
- }
- break;
- }
+ for (i = 0; i < size; i++)
+ if (fp[i])
+ return false;
- return 0;
+ return true;
}
-static int parse_flow_nlattrs(const struct nlattr *attr,
- const struct nlattr *a[], u32 *attrsp)
+static int __parse_flow_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[],
+ u64 *attrsp, bool nz)
{
const struct nlattr *nla;
u32 attrs;
int rem;
- attrs = 0;
+ attrs = *attrsp;
nla_for_each_nested(nla, attr, rem) {
u16 type = nla_type(nla);
int expected_len;
- if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type))
+ if (type > OVS_KEY_ATTR_MAX) {
+ OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
+ type, OVS_KEY_ATTR_MAX);
+ }
+
+ if (attrs & (1 << type)) {
+ OVS_NLERR("Duplicate key attribute (type %d).\n", type);
return -EINVAL;
+ }
expected_len = ovs_key_lens[type];
- if (nla_len(nla) != expected_len && expected_len != -1)
+ if (nla_len(nla) != expected_len && expected_len != -1) {
+ OVS_NLERR("Key attribute has unexpected length (type=%d"
+ ", length=%d, expected=%d).\n", type,
+ nla_len(nla), expected_len);
return -EINVAL;
+ }
- attrs |= 1 << type;
- a[type] = nla;
+ if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+ attrs |= 1 << type;
+ a[type] = nla;
+ }
}
- if (rem)
+ if (rem) {
+ OVS_NLERR("Message has %d unknown bytes.\n", rem);
return -EINVAL;
+ }
*attrsp = attrs;
return 0;
}
+static int parse_flow_mask_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[], u64 *attrsp)
+{
+ return __parse_flow_nlattrs(attr, a, attrsp, true);
+}
+
+static int parse_flow_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[], u64 *attrsp)
+{
+ return __parse_flow_nlattrs(attr, a, attrsp, false);
+}
+
int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
- struct ovs_key_ipv4_tunnel *tun_key)
+ struct sw_flow_match *match, bool is_mask)
{
struct nlattr *a;
int rem;
bool ttl = false;
-
- memset(tun_key, 0, sizeof(*tun_key));
+ __be16 tun_flags = 0;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
@@ -1000,53 +1239,78 @@ int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
[OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
};
- if (type > OVS_TUNNEL_KEY_ATTR_MAX ||
- ovs_tunnel_key_lens[type] != nla_len(a))
+ if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
+ OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
+ type, OVS_TUNNEL_KEY_ATTR_MAX);
return -EINVAL;
+ }
+
+ if (ovs_tunnel_key_lens[type] != nla_len(a)) {
+ OVS_NLERR("IPv4 tunnel attribute type has unexpected "
+ " length (type=%d, length=%d, expected=%d).\n",
+ type, nla_len(a), ovs_tunnel_key_lens[type]);
+ return -EINVAL;
+ }
switch (type) {
case OVS_TUNNEL_KEY_ATTR_ID:
- tun_key->tun_id = nla_get_be64(a);
- tun_key->tun_flags |= TUNNEL_KEY;
+ SW_FLOW_KEY_PUT(match, tun_key.tun_id,
+ nla_get_be64(a), is_mask);
+ tun_flags |= TUNNEL_KEY;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
- tun_key->ipv4_src = nla_get_be32(a);
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+ nla_get_be32(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
- tun_key->ipv4_dst = nla_get_be32(a);
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+ nla_get_be32(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TOS:
- tun_key->ipv4_tos = nla_get_u8(a);
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+ nla_get_u8(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TTL:
- tun_key->ipv4_ttl = nla_get_u8(a);
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+ nla_get_u8(a), is_mask);
ttl = true;
break;
case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
- tun_key->tun_flags |= TUNNEL_DONT_FRAGMENT;
+ tun_flags |= TUNNEL_DONT_FRAGMENT;
break;
case OVS_TUNNEL_KEY_ATTR_CSUM:
- tun_key->tun_flags |= TUNNEL_CSUM;
+ tun_flags |= TUNNEL_CSUM;
break;
default:
return -EINVAL;
-
}
}
- if (rem > 0)
- return -EINVAL;
- if (!tun_key->ipv4_dst)
- return -EINVAL;
+ SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
- if (!ttl)
+ if (rem > 0) {
+ OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
return -EINVAL;
+ }
+
+ if (!is_mask) {
+ if (!match->key->tun_key.ipv4_dst) {
+ OVS_NLERR("IPv4 tunnel destination address is zero.\n");
+ return -EINVAL;
+ }
+
+ if (!ttl) {
+ OVS_NLERR("IPv4 tunnel TTL not specified.\n");
+ return -EINVAL;
+ }
+ }
return 0;
}
int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *tun_key)
+ const struct ovs_key_ipv4_tunnel *tun_key,
+ const struct ovs_key_ipv4_tunnel *output)
{
struct nlattr *nla;
@@ -1054,23 +1318,24 @@ int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
if (!nla)
return -EMSGSIZE;
- if (tun_key->tun_flags & TUNNEL_KEY &&
- nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id))
+ if (output->tun_flags & TUNNEL_KEY &&
+ nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
return -EMSGSIZE;
- if (tun_key->ipv4_src &&
- nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src))
+ if (output->ipv4_src &&
+ nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
return -EMSGSIZE;
- if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst))
+ if (output->ipv4_dst &&
+ nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
return -EMSGSIZE;
- if (tun_key->ipv4_tos &&
- nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos))
+ if (output->ipv4_tos &&
+ nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
return -EMSGSIZE;
- if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl))
+ if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
return -EMSGSIZE;
- if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
+ if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
- if ((tun_key->tun_flags & TUNNEL_CSUM) &&
+ if ((output->tun_flags & TUNNEL_CSUM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
@@ -1078,176 +1343,390 @@ int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
return 0;
}
-/**
- * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
- * @swkey: receives the extracted flow key.
- * @key_lenp: number of bytes used in @swkey.
- * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
- * sequence.
- */
-int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
- const struct nlattr *attr)
+static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
+ const struct nlattr **a, bool is_mask)
{
- const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
- const struct ovs_key_ethernet *eth_key;
- int key_len;
- u32 attrs;
- int err;
+ if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
+ SW_FLOW_KEY_PUT(match, phy.priority,
+ nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+ }
- memset(swkey, 0, sizeof(struct sw_flow_key));
- key_len = SW_FLOW_KEY_OFFSET(eth);
+ if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
+ u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
- err = parse_flow_nlattrs(attr, a, &attrs);
- if (err)
- return err;
+ if (is_mask)
+ in_port = 0xffffffff; /* Always exact match in_port. */
+ else if (in_port >= DP_MAX_PORTS)
+ return -EINVAL;
- /* Metadata attributes. */
- if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
- swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
- attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+ SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
+ } else if (!is_mask) {
+ SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
}
- if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
- u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
- if (in_port >= DP_MAX_PORTS)
- return -EINVAL;
- swkey->phy.in_port = in_port;
- attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
- } else {
- swkey->phy.in_port = DP_MAX_PORTS;
+
+ if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
+ uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
+
+ SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
}
- if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
- swkey->phy.skb_mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
- attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
+ if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
+ if (ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
+ is_mask))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
}
+ return 0;
+}
- if (attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
- err = ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key);
- if (err)
- return err;
+static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
+ const struct nlattr **a, bool is_mask)
+{
+ int err;
+ u64 orig_attrs = attrs;
- attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
- }
+ err = metadata_from_nlattrs(match, &attrs, a, is_mask);
+ if (err)
+ return err;
- /* Data attributes. */
- if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
- return -EINVAL;
- attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+ if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
+ const struct ovs_key_ethernet *eth_key;
- eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
- memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
- memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
+ eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
+ SW_FLOW_KEY_MEMCPY(match, eth.src,
+ eth_key->eth_src, ETH_ALEN, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, eth.dst,
+ eth_key->eth_dst, ETH_ALEN, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+ }
- if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
- nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
- const struct nlattr *encap;
+ if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
__be16 tci;
- if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
- (1 << OVS_KEY_ATTR_ETHERTYPE) |
- (1 << OVS_KEY_ATTR_ENCAP)))
- return -EINVAL;
-
- encap = a[OVS_KEY_ATTR_ENCAP];
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- if (tci & htons(VLAN_TAG_PRESENT)) {
- swkey->eth.tci = tci;
-
- err = parse_flow_nlattrs(encap, a, &attrs);
- if (err)
- return err;
- } else if (!tci) {
- /* Corner case for truncated 802.1Q header. */
- if (nla_len(encap))
- return -EINVAL;
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ if (is_mask)
+ OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
+ else
+ OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
- swkey->eth.type = htons(ETH_P_8021Q);
- *key_lenp = key_len;
- return 0;
- } else {
return -EINVAL;
}
- }
+
+ SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
+ } else if (!is_mask)
+ SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
- swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
- if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN)
+ __be16 eth_type;
+
+ eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+ if (is_mask) {
+ /* Always exact match EtherType. */
+ eth_type = htons(0xffff);
+ } else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
+ OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
+ ntohs(eth_type), ETH_P_802_3_MIN);
return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
- } else {
- swkey->eth.type = htons(ETH_P_802_2);
+ } else if (!is_mask) {
+ SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
}
- if (swkey->eth.type == htons(ETH_P_IP)) {
+ if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
const struct ovs_key_ipv4 *ipv4_key;
- if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
- return -EINVAL;
- attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
-
- key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
- if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
+ if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
+ OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
+ ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
return -EINVAL;
- swkey->ip.proto = ipv4_key->ipv4_proto;
- swkey->ip.tos = ipv4_key->ipv4_tos;
- swkey->ip.ttl = ipv4_key->ipv4_ttl;
- swkey->ip.frag = ipv4_key->ipv4_frag;
- swkey->ipv4.addr.src = ipv4_key->ipv4_src;
- swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
-
- if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
- err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
- if (err)
- return err;
}
- } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- const struct ovs_key_ipv6 *ipv6_key;
+ SW_FLOW_KEY_PUT(match, ip.proto,
+ ipv4_key->ipv4_proto, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.tos,
+ ipv4_key->ipv4_tos, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.ttl,
+ ipv4_key->ipv4_ttl, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.frag,
+ ipv4_key->ipv4_frag, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.addr.src,
+ ipv4_key->ipv4_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
+ ipv4_key->ipv4_dst, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
+ }
- if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
- return -EINVAL;
- attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+ if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
+ const struct ovs_key_ipv6 *ipv6_key;
- key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
- if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
+ if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
+ OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
+ ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
return -EINVAL;
- swkey->ipv6.label = ipv6_key->ipv6_label;
- swkey->ip.proto = ipv6_key->ipv6_proto;
- swkey->ip.tos = ipv6_key->ipv6_tclass;
- swkey->ip.ttl = ipv6_key->ipv6_hlimit;
- swkey->ip.frag = ipv6_key->ipv6_frag;
- memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
- sizeof(swkey->ipv6.addr.src));
- memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
- sizeof(swkey->ipv6.addr.dst));
-
- if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
- err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
- if (err)
- return err;
}
- } else if (swkey->eth.type == htons(ETH_P_ARP) ||
- swkey->eth.type == htons(ETH_P_RARP)) {
+ SW_FLOW_KEY_PUT(match, ipv6.label,
+ ipv6_key->ipv6_label, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.proto,
+ ipv6_key->ipv6_proto, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.tos,
+ ipv6_key->ipv6_tclass, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.ttl,
+ ipv6_key->ipv6_hlimit, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.frag,
+ ipv6_key->ipv6_frag, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
+ ipv6_key->ipv6_src,
+ sizeof(match->key->ipv6.addr.src),
+ is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
+ ipv6_key->ipv6_dst,
+ sizeof(match->key->ipv6.addr.dst),
+ is_mask);
+
+ attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
const struct ovs_key_arp *arp_key;
- if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
+ arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
+ if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
+ OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
+ arp_key->arp_op);
return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, ipv4.addr.src,
+ arp_key->arp_sip, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
+ arp_key->arp_tip, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.proto,
+ ntohs(arp_key->arp_op), is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
+ arp_key->arp_sha, ETH_ALEN, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
+ arp_key->arp_tha, ETH_ALEN, is_mask);
+
attrs &= ~(1 << OVS_KEY_ATTR_ARP);
+ }
- key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
- arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
- swkey->ipv4.addr.src = arp_key->arp_sip;
- swkey->ipv4.addr.dst = arp_key->arp_tip;
- if (arp_key->arp_op & htons(0xff00))
+ if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
+ const struct ovs_key_tcp *tcp_key;
+
+ tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ tcp_key->tcp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ tcp_key->tcp_dst, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ tcp_key->tcp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ tcp_key->tcp_dst, is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
+ const struct ovs_key_udp *udp_key;
+
+ udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ udp_key->udp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ udp_key->udp_dst, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ udp_key->udp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ udp_key->udp_dst, is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
+ const struct ovs_key_sctp *sctp_key;
+
+ sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ sctp_key->sctp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ sctp_key->sctp_dst, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ sctp_key->sctp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ sctp_key->sctp_dst, is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
+ const struct ovs_key_icmp *icmp_key;
+
+ icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ htons(icmp_key->icmp_type), is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ htons(icmp_key->icmp_code), is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
+ const struct ovs_key_icmpv6 *icmpv6_key;
+
+ icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ htons(icmpv6_key->icmpv6_type), is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ htons(icmpv6_key->icmpv6_code), is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ND)) {
+ const struct ovs_key_nd *nd_key;
+
+ nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
+ nd_key->nd_target,
+ sizeof(match->key->ipv6.nd.target),
+ is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
+ nd_key->nd_sll, ETH_ALEN, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
+ nd_key->nd_tll, ETH_ALEN, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ND);
+ }
+
+ if (attrs != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and
+ * mask. In case the 'mask' is NULL, the flow is treated as exact match
+ * flow. Otherwise, it is treated as a wildcarded flow, except the mask
+ * does not include any don't care bit.
+ * @match: receives the extracted flow match information.
+ * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence. The fields should of the packet that triggered the creation
+ * of this flow.
+ * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
+ * attribute specifies the mask field of the wildcarded flow.
+ */
+int ovs_match_from_nlattrs(struct sw_flow_match *match,
+ const struct nlattr *key,
+ const struct nlattr *mask)
+{
+ const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+ const struct nlattr *encap;
+ u64 key_attrs = 0;
+ u64 mask_attrs = 0;
+ bool encap_valid = false;
+ int err;
+
+ err = parse_flow_nlattrs(key, a, &key_attrs);
+ if (err)
+ return err;
+
+ if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
+ (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
+ __be16 tci;
+
+ if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
+ OVS_NLERR("Invalid Vlan frame.\n");
return -EINVAL;
- swkey->ip.proto = ntohs(arp_key->arp_op);
- memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
- memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
+ }
+
+ key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+ encap = a[OVS_KEY_ATTR_ENCAP];
+ key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+ encap_valid = true;
+
+ if (tci & htons(VLAN_TAG_PRESENT)) {
+ err = parse_flow_nlattrs(encap, a, &key_attrs);
+ if (err)
+ return err;
+ } else if (!tci) {
+ /* Corner case for truncated 802.1Q header. */
+ if (nla_len(encap)) {
+ OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
+ return -EINVAL;
+ }
+ } else {
+ OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
+ return -EINVAL;
+ }
+ }
+
+ err = ovs_key_from_nlattrs(match, key_attrs, a, false);
+ if (err)
+ return err;
+
+ if (mask) {
+ err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
+ if (err)
+ return err;
+
+ if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) {
+ __be16 eth_type = 0;
+ __be16 tci = 0;
+
+ if (!encap_valid) {
+ OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
+ return -EINVAL;
+ }
+
+ mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+ if (a[OVS_KEY_ATTR_ETHERTYPE])
+ eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+
+ if (eth_type == htons(0xffff)) {
+ mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+ encap = a[OVS_KEY_ATTR_ENCAP];
+ err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
+ } else {
+ OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
+ ntohs(eth_type));
+ return -EINVAL;
+ }
+
+ if (a[OVS_KEY_ATTR_VLAN])
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
+ return -EINVAL;
+ }
+ }
+
+ err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
+ if (err)
+ return err;
+ } else {
+ /* Populate exact match flow's key mask. */
+ if (match->mask)
+ ovs_sw_flow_mask_set(match->mask, &match->range, 0xff);
}
- if (attrs)
+ if (!ovs_match_validate(match, key_attrs, mask_attrs))
return -EINVAL;
- *key_lenp = key_len;
return 0;
}
@@ -1255,7 +1734,6 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
/**
* ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
* @flow: Receives extracted in_port, priority, tun_key and skb_mark.
- * @key_len: Length of key in @flow. Used for calculating flow hash.
* @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence.
*
@@ -1264,102 +1742,100 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
* get the metadata, that is, the parts of the flow key that cannot be
* extracted from the packet itself.
*/
-int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len,
- const struct nlattr *attr)
+
+int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
+ const struct nlattr *attr)
{
struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
- const struct nlattr *nla;
- int rem;
+ const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+ u64 attrs = 0;
+ int err;
+ struct sw_flow_match match;
flow->key.phy.in_port = DP_MAX_PORTS;
flow->key.phy.priority = 0;
flow->key.phy.skb_mark = 0;
memset(tun_key, 0, sizeof(flow->key.tun_key));
- nla_for_each_nested(nla, attr, rem) {
- int type = nla_type(nla);
-
- if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
- int err;
-
- if (nla_len(nla) != ovs_key_lens[type])
- return -EINVAL;
-
- switch (type) {
- case OVS_KEY_ATTR_PRIORITY:
- flow->key.phy.priority = nla_get_u32(nla);
- break;
-
- case OVS_KEY_ATTR_TUNNEL:
- err = ovs_ipv4_tun_from_nlattr(nla, tun_key);
- if (err)
- return err;
- break;
-
- case OVS_KEY_ATTR_IN_PORT:
- if (nla_get_u32(nla) >= DP_MAX_PORTS)
- return -EINVAL;
- flow->key.phy.in_port = nla_get_u32(nla);
- break;
-
- case OVS_KEY_ATTR_SKB_MARK:
- flow->key.phy.skb_mark = nla_get_u32(nla);
- break;
- }
- }
- }
- if (rem)
+ err = parse_flow_nlattrs(attr, a, &attrs);
+ if (err)
return -EINVAL;
- flow->hash = ovs_flow_hash(&flow->key,
- flow_key_start(&flow->key), key_len);
+ memset(&match, 0, sizeof(match));
+ match.key = &flow->key;
+
+ err = metadata_from_nlattrs(&match, &attrs, a, false);
+ if (err)
+ return err;
return 0;
}
-int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
+int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey,
+ const struct sw_flow_key *output, struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
struct nlattr *nla, *encap;
+ bool is_mask = (swkey != output);
- if (swkey->phy.priority &&
- nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
goto nla_put_failure;
- if (swkey->tun_key.ipv4_dst &&
- ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key))
+ if ((swkey->tun_key.ipv4_dst || is_mask) &&
+ ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key))
goto nla_put_failure;
- if (swkey->phy.in_port != DP_MAX_PORTS &&
- nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
- goto nla_put_failure;
+ if (swkey->phy.in_port == DP_MAX_PORTS) {
+ if (is_mask && (output->phy.in_port == 0xffff))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
+ goto nla_put_failure;
+ } else {
+ u16 upper_u16;
+ upper_u16 = !is_mask ? 0 : 0xffff;
- if (swkey->phy.skb_mark &&
- nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
+ (upper_u16 << 16) | output->phy.in_port))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
goto nla_put_failure;
nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
if (!nla)
goto nla_put_failure;
+
eth_key = nla_data(nla);
- memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
- memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
+ memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
+ memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
- if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
- nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
+ __be16 eth_type;
+ eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
+ nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
goto nla_put_failure;
encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
if (!swkey->eth.tci)
goto unencap;
- } else {
+ } else
encap = NULL;
- }
- if (swkey->eth.type == htons(ETH_P_802_2))
+ if (swkey->eth.type == htons(ETH_P_802_2)) {
+ /*
+ * Ethertype 802.2 is represented in the netlink with omitted
+ * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
+ * 0xffff in the mask attribute. Ethertype can also
+ * be wildcarded.
+ */
+ if (is_mask && output->eth.type)
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
+ output->eth.type))
+ goto nla_put_failure;
goto unencap;
+ }
- if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
goto nla_put_failure;
if (swkey->eth.type == htons(ETH_P_IP)) {
@@ -1369,12 +1845,12 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
ipv4_key = nla_data(nla);
- ipv4_key->ipv4_src = swkey->ipv4.addr.src;
- ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
- ipv4_key->ipv4_proto = swkey->ip.proto;
- ipv4_key->ipv4_tos = swkey->ip.tos;
- ipv4_key->ipv4_ttl = swkey->ip.ttl;
- ipv4_key->ipv4_frag = swkey->ip.frag;
+ ipv4_key->ipv4_src = output->ipv4.addr.src;
+ ipv4_key->ipv4_dst = output->ipv4.addr.dst;
+ ipv4_key->ipv4_proto = output->ip.proto;
+ ipv4_key->ipv4_tos = output->ip.tos;
+ ipv4_key->ipv4_ttl = output->ip.ttl;
+ ipv4_key->ipv4_frag = output->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
struct ovs_key_ipv6 *ipv6_key;
@@ -1382,15 +1858,15 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
ipv6_key = nla_data(nla);
- memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
+ memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
sizeof(ipv6_key->ipv6_src));
- memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
+ memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
sizeof(ipv6_key->ipv6_dst));
- ipv6_key->ipv6_label = swkey->ipv6.label;
- ipv6_key->ipv6_proto = swkey->ip.proto;
- ipv6_key->ipv6_tclass = swkey->ip.tos;
- ipv6_key->ipv6_hlimit = swkey->ip.ttl;
- ipv6_key->ipv6_frag = swkey->ip.frag;
+ ipv6_key->ipv6_label = output->ipv6.label;
+ ipv6_key->ipv6_proto = output->ip.proto;
+ ipv6_key->ipv6_tclass = output->ip.tos;
+ ipv6_key->ipv6_hlimit = output->ip.ttl;
+ ipv6_key->ipv6_frag = output->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_ARP) ||
swkey->eth.type == htons(ETH_P_RARP)) {
struct ovs_key_arp *arp_key;
@@ -1400,11 +1876,11 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
goto nla_put_failure;
arp_key = nla_data(nla);
memset(arp_key, 0, sizeof(struct ovs_key_arp));
- arp_key->arp_sip = swkey->ipv4.addr.src;
- arp_key->arp_tip = swkey->ipv4.addr.dst;
- arp_key->arp_op = htons(swkey->ip.proto);
- memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
- memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
+ arp_key->arp_sip = output->ipv4.addr.src;
+ arp_key->arp_tip = output->ipv4.addr.dst;
+ arp_key->arp_op = htons(output->ip.proto);
+ memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
+ memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
}
if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -1419,11 +1895,11 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
goto nla_put_failure;
tcp_key = nla_data(nla);
if (swkey->eth.type == htons(ETH_P_IP)) {
- tcp_key->tcp_src = swkey->ipv4.tp.src;
- tcp_key->tcp_dst = swkey->ipv4.tp.dst;
+ tcp_key->tcp_src = output->ipv4.tp.src;
+ tcp_key->tcp_dst = output->ipv4.tp.dst;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- tcp_key->tcp_src = swkey->ipv6.tp.src;
- tcp_key->tcp_dst = swkey->ipv6.tp.dst;
+ tcp_key->tcp_src = output->ipv6.tp.src;
+ tcp_key->tcp_dst = output->ipv6.tp.dst;
}
} else if (swkey->ip.proto == IPPROTO_UDP) {
struct ovs_key_udp *udp_key;
@@ -1433,11 +1909,25 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
goto nla_put_failure;
udp_key = nla_data(nla);
if (swkey->eth.type == htons(ETH_P_IP)) {
- udp_key->udp_src = swkey->ipv4.tp.src;
- udp_key->udp_dst = swkey->ipv4.tp.dst;
+ udp_key->udp_src = output->ipv4.tp.src;
+ udp_key->udp_dst = output->ipv4.tp.dst;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ udp_key->udp_src = output->ipv6.tp.src;
+ udp_key->udp_dst = output->ipv6.tp.dst;
+ }
+ } else if (swkey->ip.proto == IPPROTO_SCTP) {
+ struct ovs_key_sctp *sctp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
+ if (!nla)
+ goto nla_put_failure;
+ sctp_key = nla_data(nla);
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ sctp_key->sctp_src = swkey->ipv4.tp.src;
+ sctp_key->sctp_dst = swkey->ipv4.tp.dst;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- udp_key->udp_src = swkey->ipv6.tp.src;
- udp_key->udp_dst = swkey->ipv6.tp.dst;
+ sctp_key->sctp_src = swkey->ipv6.tp.src;
+ sctp_key->sctp_dst = swkey->ipv6.tp.dst;
}
} else if (swkey->eth.type == htons(ETH_P_IP) &&
swkey->ip.proto == IPPROTO_ICMP) {
@@ -1447,8 +1937,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
icmp_key = nla_data(nla);
- icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
- icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
+ icmp_key->icmp_type = ntohs(output->ipv4.tp.src);
+ icmp_key->icmp_code = ntohs(output->ipv4.tp.dst);
} else if (swkey->eth.type == htons(ETH_P_IPV6) &&
swkey->ip.proto == IPPROTO_ICMPV6) {
struct ovs_key_icmpv6 *icmpv6_key;
@@ -1458,8 +1948,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
icmpv6_key = nla_data(nla);
- icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
- icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
+ icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src);
+ icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst);
if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
@@ -1469,10 +1959,10 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
nd_key = nla_data(nla);
- memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
+ memcpy(nd_key->nd_target, &output->ipv6.nd.target,
sizeof(nd_key->nd_target));
- memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
- memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
+ memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
+ memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
}
}
}
@@ -1491,6 +1981,9 @@ nla_put_failure:
* Returns zero if successful or a negative error code. */
int ovs_flow_init(void)
{
+ BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
+ BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
+
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
0, NULL);
if (flow_cache == NULL)
@@ -1504,3 +1997,84 @@ void ovs_flow_exit(void)
{
kmem_cache_destroy(flow_cache);
}
+
+struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
+{
+ struct sw_flow_mask *mask;
+
+ mask = kmalloc(sizeof(*mask), GFP_KERNEL);
+ if (mask)
+ mask->ref_count = 0;
+
+ return mask;
+}
+
+void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask)
+{
+ mask->ref_count++;
+}
+
+void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
+{
+ if (!mask)
+ return;
+
+ BUG_ON(!mask->ref_count);
+ mask->ref_count--;
+
+ if (!mask->ref_count) {
+ list_del_rcu(&mask->list);
+ if (deferred)
+ kfree_rcu(mask, rcu);
+ else
+ kfree(mask);
+ }
+}
+
+static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a,
+ const struct sw_flow_mask *b)
+{
+ u8 *a_ = (u8 *)&a->key + a->range.start;
+ u8 *b_ = (u8 *)&b->key + b->range.start;
+
+ return (a->range.end == b->range.end)
+ && (a->range.start == b->range.start)
+ && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
+}
+
+struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
+ const struct sw_flow_mask *mask)
+{
+ struct list_head *ml;
+
+ list_for_each(ml, tbl->mask_list) {
+ struct sw_flow_mask *m;
+ m = container_of(ml, struct sw_flow_mask, list);
+ if (ovs_sw_flow_mask_equal(mask, m))
+ return m;
+ }
+
+ return NULL;
+}
+
+/**
+ * add a new mask into the mask list.
+ * The caller needs to make sure that 'mask' is not the same
+ * as any masks that are already on the list.
+ */
+void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
+{
+ list_add_rcu(&mask->list, tbl->mask_list);
+}
+
+/**
+ * Set 'range' fields in the mask to the value of 'val'.
+ */
+static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
+ struct sw_flow_key_range *range, u8 val)
+{
+ u8 *m = (u8 *)&mask->key + range->start;
+
+ mask->range = *range;
+ memset(m, val, range_n_bytes(range));
+}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 66ef7220293e..212fbf7510c4 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira, Inc.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -33,6 +33,8 @@
#include <net/inet_ecn.h>
struct sk_buff;
+struct sw_flow_mask;
+struct flow_table;
struct sw_flow_actions {
struct rcu_head rcu;
@@ -97,8 +99,8 @@ struct sw_flow_key {
} addr;
union {
struct {
- __be16 src; /* TCP/UDP source port. */
- __be16 dst; /* TCP/UDP destination port. */
+ __be16 src; /* TCP/UDP/SCTP source port. */
+ __be16 dst; /* TCP/UDP/SCTP destination port. */
} tp;
struct {
u8 sha[ETH_ALEN]; /* ARP source hardware address. */
@@ -113,8 +115,8 @@ struct sw_flow_key {
} addr;
__be32 label; /* IPv6 flow label. */
struct {
- __be16 src; /* TCP/UDP source port. */
- __be16 dst; /* TCP/UDP destination port. */
+ __be16 src; /* TCP/UDP/SCTP source port. */
+ __be16 dst; /* TCP/UDP/SCTP destination port. */
} tp;
struct {
struct in6_addr target; /* ND target address. */
@@ -123,7 +125,7 @@ struct sw_flow_key {
} nd;
} ipv6;
};
-};
+} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
struct sw_flow {
struct rcu_head rcu;
@@ -131,6 +133,8 @@ struct sw_flow {
u32 hash;
struct sw_flow_key key;
+ struct sw_flow_key unmasked_key;
+ struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
spinlock_t lock; /* Lock for values below. */
@@ -140,6 +144,20 @@ struct sw_flow {
u8 tcp_flags; /* Union of seen TCP flags. */
};
+struct sw_flow_key_range {
+ size_t start;
+ size_t end;
+};
+
+struct sw_flow_match {
+ struct sw_flow_key *key;
+ struct sw_flow_key_range range;
+ struct sw_flow_mask *mask;
+};
+
+void ovs_match_init(struct sw_flow_match *match,
+ struct sw_flow_key *key, struct sw_flow_mask *mask);
+
struct arp_eth_header {
__be16 ar_hrd; /* format of hardware address */
__be16 ar_pro; /* format of protocol address */
@@ -159,21 +177,21 @@ void ovs_flow_exit(void);
struct sw_flow *ovs_flow_alloc(void);
void ovs_flow_deferred_free(struct sw_flow *);
-void ovs_flow_free(struct sw_flow *flow);
+void ovs_flow_free(struct sw_flow *, bool deferred);
struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len);
void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
-int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
- int *key_lenp);
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
void ovs_flow_used(struct sw_flow *, struct sk_buff *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
-
-int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
-int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+int ovs_flow_to_nlattrs(const struct sw_flow_key *,
+ const struct sw_flow_key *, struct sk_buff *);
+int ovs_match_from_nlattrs(struct sw_flow_match *match,
+ const struct nlattr *,
const struct nlattr *);
-int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len,
- const struct nlattr *attr);
+int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
+ const struct nlattr *attr);
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
#define TBL_MIN_BUCKETS 1024
@@ -182,6 +200,7 @@ struct flow_table {
struct flex_array *buckets;
unsigned int count, n_buckets;
struct rcu_head rcu;
+ struct list_head *mask_list;
int node_ver;
u32 hash_seed;
bool keep_flows;
@@ -197,22 +216,44 @@ static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
return (table->count > table->n_buckets);
}
-struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
- struct sw_flow_key *key, int len);
-void ovs_flow_tbl_destroy(struct flow_table *table);
-void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
+struct sw_flow *ovs_flow_lookup(struct flow_table *,
+ const struct sw_flow_key *);
+struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
+ struct sw_flow_match *match);
+
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
struct flow_table *ovs_flow_tbl_alloc(int new_size);
struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
-void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
- struct sw_flow_key *key, int key_len);
-void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
-struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
+void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow);
+
+struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *idx);
extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
- struct ovs_key_ipv4_tunnel *tun_key);
+ struct sw_flow_match *match, bool is_mask);
int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *tun_key);
+ const struct ovs_key_ipv4_tunnel *tun_key,
+ const struct ovs_key_ipv4_tunnel *output);
+
+bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key, int key_end);
+
+struct sw_flow_mask {
+ int ref_count;
+ struct rcu_head rcu;
+ struct list_head list;
+ struct sw_flow_key_range range;
+ struct sw_flow_key key;
+};
+struct sw_flow_mask *ovs_sw_flow_mask_alloc(void);
+void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *);
+void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred);
+void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *);
+struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *,
+ const struct sw_flow_mask *);
+void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
+ const struct sw_flow_mask *mask);
#endif /* flow.h */
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 493e9775dcda..c99dea543d64 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -16,7 +16,6 @@
* 02110-1301, USA
*/
-#ifdef CONFIG_OPENVSWITCH_GRE
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/if.h>
@@ -177,10 +176,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
skb->local_df = 1;
- return iptunnel_xmit(net, rt, skb, fl.saddr,
+ return iptunnel_xmit(rt, skb, fl.saddr,
OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
OVS_CB(skb)->tun_key->ipv4_tos,
- OVS_CB(skb)->tun_key->ipv4_ttl, df);
+ OVS_CB(skb)->tun_key->ipv4_ttl, df, false);
err_free_rt:
ip_rt_put(rt);
error:
@@ -271,5 +270,3 @@ const struct vport_ops ovs_gre_vport_ops = {
.get_name = gre_get_name,
.send = gre_tnl_send,
};
-
-#endif /* OPENVSWITCH_GRE */
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 5982f3f62835..09d93c13cfd6 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -25,6 +25,7 @@
#include <linux/llc.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
+#include <linux/openvswitch.h>
#include <net/llc.h>
@@ -74,6 +75,15 @@ static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
}
+static struct net_device *get_dpdev(struct datapath *dp)
+{
+ struct vport *local;
+
+ local = ovs_vport_ovsl(dp, OVSP_LOCAL);
+ BUG_ON(!local);
+ return netdev_vport_priv(local)->dev;
+}
+
static struct vport *netdev_create(const struct vport_parms *parms)
{
struct vport *vport;
@@ -103,10 +113,15 @@ static struct vport *netdev_create(const struct vport_parms *parms)
}
rtnl_lock();
+ err = netdev_master_upper_dev_link(netdev_vport->dev,
+ get_dpdev(vport->dp));
+ if (err)
+ goto error_unlock;
+
err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
vport);
if (err)
- goto error_unlock;
+ goto error_master_upper_dev_unlink;
dev_set_promiscuity(netdev_vport->dev, 1);
netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
@@ -114,6 +129,8 @@ static struct vport *netdev_create(const struct vport_parms *parms)
return vport;
+error_master_upper_dev_unlink:
+ netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
error_unlock:
rtnl_unlock();
error_put:
@@ -140,6 +157,7 @@ static void netdev_destroy(struct vport *vport)
rtnl_lock();
netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(netdev_vport->dev);
+ netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
dev_set_promiscuity(netdev_vport->dev, -1);
rtnl_unlock();
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
new file mode 100644
index 000000000000..a481c03e2861
--- /dev/null
+++ b/net/openvswitch/vport-vxlan.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2013 Nicira, Inc.
+ * Copyright (c) 2013 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/net.h>
+#include <linux/rculist.h>
+#include <linux/udp.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/vxlan.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+/**
+ * struct vxlan_port - Keeps track of open UDP ports
+ * @vs: vxlan_sock created for the port.
+ * @name: vport name.
+ */
+struct vxlan_port {
+ struct vxlan_sock *vs;
+ char name[IFNAMSIZ];
+};
+
+static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
+{
+ return vport_priv(vport);
+}
+
+/* Called with rcu_read_lock and BH disabled. */
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
+{
+ struct ovs_key_ipv4_tunnel tun_key;
+ struct vport *vport = vs->data;
+ struct iphdr *iph;
+ __be64 key;
+
+ /* Save outer tunnel values */
+ iph = ip_hdr(skb);
+ key = cpu_to_be64(ntohl(vx_vni) >> 8);
+ ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
+
+ ovs_vport_receive(vport, skb, &tun_key);
+}
+
+static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+
+ if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static void vxlan_tnl_destroy(struct vport *vport)
+{
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+
+ vxlan_sock_release(vxlan_port->vs);
+
+ ovs_vport_deferred_free(vport);
+}
+
+static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
+{
+ struct net *net = ovs_dp_get_net(parms->dp);
+ struct nlattr *options = parms->options;
+ struct vxlan_port *vxlan_port;
+ struct vxlan_sock *vs;
+ struct vport *vport;
+ struct nlattr *a;
+ u16 dst_port;
+ int err;
+
+ if (!options) {
+ err = -EINVAL;
+ goto error;
+ }
+ a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
+ if (a && nla_len(a) == sizeof(u16)) {
+ dst_port = nla_get_u16(a);
+ } else {
+ /* Require destination port from userspace. */
+ err = -EINVAL;
+ goto error;
+ }
+
+ vport = ovs_vport_alloc(sizeof(struct vxlan_port),
+ &ovs_vxlan_vport_ops, parms);
+ if (IS_ERR(vport))
+ return vport;
+
+ vxlan_port = vxlan_vport(vport);
+ strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
+
+ vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, false);
+ if (IS_ERR(vs)) {
+ ovs_vport_free(vport);
+ return (void *)vs;
+ }
+ vxlan_port->vs = vs;
+
+ return vport;
+
+error:
+ return ERR_PTR(err);
+}
+
+static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
+{
+ struct net *net = ovs_dp_get_net(vport->dp);
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+ struct rtable *rt;
+ struct flowi4 fl;
+ __be16 src_port;
+ int port_min;
+ int port_max;
+ __be16 df;
+ int err;
+
+ if (unlikely(!OVS_CB(skb)->tun_key)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Route lookup */
+ memset(&fl, 0, sizeof(fl));
+ fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
+ fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
+ fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
+ fl.flowi4_mark = skb->mark;
+ fl.flowi4_proto = IPPROTO_UDP;
+
+ rt = ip_route_output_key(net, &fl);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto error;
+ }
+
+ df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
+ htons(IP_DF) : 0;
+
+ skb->local_df = 1;
+
+ inet_get_local_port_range(&port_min, &port_max);
+ src_port = vxlan_src_port(port_min, port_max, skb);
+
+ err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
+ fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst,
+ OVS_CB(skb)->tun_key->ipv4_tos,
+ OVS_CB(skb)->tun_key->ipv4_ttl, df,
+ src_port, dst_port,
+ htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
+ if (err < 0)
+ ip_rt_put(rt);
+error:
+ return err;
+}
+
+static const char *vxlan_get_name(const struct vport *vport)
+{
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ return vxlan_port->name;
+}
+
+const struct vport_ops ovs_vxlan_vport_ops = {
+ .type = OVS_VPORT_TYPE_VXLAN,
+ .create = vxlan_tnl_create,
+ .destroy = vxlan_tnl_destroy,
+ .get_name = vxlan_get_name,
+ .get_options = vxlan_get_options,
+ .send = vxlan_tnl_send,
+};
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index d4c7fa04ce08..6f65dbe13812 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -42,6 +42,9 @@ static const struct vport_ops *vport_ops_list[] = {
#ifdef CONFIG_OPENVSWITCH_GRE
&ovs_gre_vport_ops,
#endif
+#ifdef CONFIG_OPENVSWITCH_VXLAN
+ &ovs_vxlan_vport_ops,
+#endif
};
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
@@ -200,7 +203,7 @@ out:
* ovs_vport_set_options - modify existing vport device (for kernel callers)
*
* @vport: vport to modify.
- * @port: New configuration.
+ * @options: New configuration.
*
* Modifies an existing device with the specified configuration (which is
* dependent on device type). ovs_mutex must be held.
@@ -325,6 +328,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
*
* @vport: vport that received the packet
* @skb: skb that was received
+ * @tun_key: tunnel (if any) that carried packet
*
* Must be called with rcu_read_lock. The packet cannot be shared and
* skb->data should point to the Ethernet header.
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 376045c42f8b..1a9fbcec6e1b 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -199,6 +199,7 @@ void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
extern const struct vport_ops ovs_netdev_vport_ops;
extern const struct vport_ops ovs_internal_vport_ops;
extern const struct vport_ops ovs_gre_vport_ops;
+extern const struct vport_ops ovs_vxlan_vport_ops;
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 75c8bbf598c8..2e8286b47c28 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -88,7 +88,7 @@
#include <linux/virtio_net.h>
#include <linux/errqueue.h>
#include <linux/net_tstamp.h>
-
+#include <linux/reciprocal_div.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
#endif
@@ -1135,7 +1135,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
- return (((u64)skb->rxhash) * num) >> 32;
+ return reciprocal_divide(skb->rxhash, num);
}
static unsigned int fanout_demux_lb(struct packet_fanout *f,
@@ -1158,6 +1158,13 @@ static unsigned int fanout_demux_cpu(struct packet_fanout *f,
return smp_processor_id() % num;
}
+static unsigned int fanout_demux_rnd(struct packet_fanout *f,
+ struct sk_buff *skb,
+ unsigned int num)
+{
+ return reciprocal_divide(prandom_u32(), num);
+}
+
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int idx, unsigned int skip,
@@ -1215,6 +1222,9 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
case PACKET_FANOUT_CPU:
idx = fanout_demux_cpu(f, skb, num);
break;
+ case PACKET_FANOUT_RND:
+ idx = fanout_demux_rnd(f, skb, num);
+ break;
case PACKET_FANOUT_ROLLOVER:
idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
break;
@@ -1284,6 +1294,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
case PACKET_FANOUT_HASH:
case PACKET_FANOUT_LB:
case PACKET_FANOUT_CPU:
+ case PACKET_FANOUT_RND:
break;
default:
return -EINVAL;
@@ -2181,7 +2192,7 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err);
+ err, 0);
if (!skb)
return NULL;
@@ -2638,51 +2649,6 @@ out:
return err;
}
-static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
-{
- struct sock_exterr_skb *serr;
- struct sk_buff *skb, *skb2;
- int copied, err;
-
- err = -EAGAIN;
- skb = skb_dequeue(&sk->sk_error_queue);
- if (skb == NULL)
- goto out;
-
- copied = skb->len;
- if (copied > len) {
- msg->msg_flags |= MSG_TRUNC;
- copied = len;
- }
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- if (err)
- goto out_free_skb;
-
- sock_recv_timestamp(msg, sk, skb);
-
- serr = SKB_EXT_ERR(skb);
- put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
- sizeof(serr->ee), &serr->ee);
-
- msg->msg_flags |= MSG_ERRQUEUE;
- err = copied;
-
- /* Reset and regenerate socket error */
- spin_lock_bh(&sk->sk_error_queue.lock);
- sk->sk_err = 0;
- if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
- sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_bh(&sk->sk_error_queue.lock);
- sk->sk_error_report(sk);
- } else
- spin_unlock_bh(&sk->sk_error_queue.lock);
-
-out_free_skb:
- kfree_skb(skb);
-out:
- return err;
-}
-
/*
* Pull a packet from our receive queue and hand it to the user.
* If necessary we block.
@@ -2708,7 +2674,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
#endif
if (flags & MSG_ERRQUEUE) {
- err = packet_recv_error(sk, msg, len);
+ err = sock_recv_errqueue(sk, msg, len,
+ SOL_PACKET, PACKET_TX_TIMESTAMP);
goto out;
}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 1afd1381cdc7..77e38f733496 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -793,7 +793,7 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
struct sock **psk = v;
struct sock *sk = *psk;
- seq_printf(seq, "%02X %5d %lu%n",
+ seq_printf(seq, "%02X %5u %lu%n",
(int) (psk - pnres.sk),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk), &len);
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index d11ac79246e4..cf5b145902e5 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -30,6 +30,7 @@ struct rfkill_regulator_data {
static int rfkill_regulator_set_block(void *data, bool blocked)
{
struct rfkill_regulator_data *rfkill_data = data;
+ int ret = 0;
pr_debug("%s: blocked: %d\n", __func__, blocked);
@@ -40,15 +41,16 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
}
} else {
if (!rfkill_data->reg_enabled) {
- regulator_enable(rfkill_data->vcc);
- rfkill_data->reg_enabled = true;
+ ret = regulator_enable(rfkill_data->vcc);
+ if (!ret)
+ rfkill_data->reg_enabled = true;
}
}
pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__,
regulator_is_enabled(rfkill_data->vcc));
- return 0;
+ return ret;
}
static struct rfkill_ops rfkill_regulator_ops = {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 235e01acac51..c03a32a0418e 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -272,6 +272,20 @@ config NET_SCH_FQ_CODEL
If unsure, say N.
+config NET_SCH_FQ
+ tristate "Fair Queue"
+ help
+ Say Y here if you want to use the FQ packet scheduling algorithm.
+
+ FQ does flow separation, and is able to respect pacing requirements
+ set by TCP stack into sk->sk_pacing_rate (for localy generated
+ traffic)
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_fq.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 978cbf004e80..e5f9abe9a5db 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
+obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 51b968d3febb..2adda7fa2d39 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -200,6 +200,58 @@ int unregister_qdisc(struct Qdisc_ops *qops)
}
EXPORT_SYMBOL(unregister_qdisc);
+/* Get default qdisc if not otherwise specified */
+void qdisc_get_default(char *name, size_t len)
+{
+ read_lock(&qdisc_mod_lock);
+ strlcpy(name, default_qdisc_ops->id, len);
+ read_unlock(&qdisc_mod_lock);
+}
+
+static struct Qdisc_ops *qdisc_lookup_default(const char *name)
+{
+ struct Qdisc_ops *q = NULL;
+
+ for (q = qdisc_base; q; q = q->next) {
+ if (!strcmp(name, q->id)) {
+ if (!try_module_get(q->owner))
+ q = NULL;
+ break;
+ }
+ }
+
+ return q;
+}
+
+/* Set new default qdisc to use */
+int qdisc_set_default(const char *name)
+{
+ const struct Qdisc_ops *ops;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ write_lock(&qdisc_mod_lock);
+ ops = qdisc_lookup_default(name);
+ if (!ops) {
+ /* Not found, drop lock and try to load module */
+ write_unlock(&qdisc_mod_lock);
+ request_module("sch_%s", name);
+ write_lock(&qdisc_mod_lock);
+
+ ops = qdisc_lookup_default(name);
+ }
+
+ if (ops) {
+ /* Set new default */
+ module_put(default_qdisc_ops->owner);
+ default_qdisc_ops = ops;
+ }
+ write_unlock(&qdisc_mod_lock);
+
+ return ops ? 0 : -ENOENT;
+}
+
/* We know handle. Find qdisc among all qdisc's attached to device
(root qdisc, all its children, children of children etc.)
*/
@@ -1854,6 +1906,7 @@ static int __init pktsched_init(void)
return err;
}
+ register_qdisc(&pfifo_fast_ops);
register_qdisc(&pfifo_qdisc_ops);
register_qdisc(&bfifo_qdisc_ops);
register_qdisc(&pfifo_head_drop_qdisc_ops);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
new file mode 100644
index 000000000000..32ad015ee8ce
--- /dev/null
+++ b/net/sched/sch_fq.c
@@ -0,0 +1,793 @@
+/*
+ * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
+ *
+ * Copyright (C) 2013 Eric Dumazet <edumazet@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Meant to be mostly used for localy generated traffic :
+ * Fast classification depends on skb->sk being set before reaching us.
+ * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
+ * All packets belonging to a socket are considered as a 'flow'.
+ *
+ * Flows are dynamically allocated and stored in a hash table of RB trees
+ * They are also part of one Round Robin 'queues' (new or old flows)
+ *
+ * Burst avoidance (aka pacing) capability :
+ *
+ * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
+ * bunch of packets, and this packet scheduler adds delay between
+ * packets to respect rate limitation.
+ *
+ * enqueue() :
+ * - lookup one RB tree (out of 1024 or more) to find the flow.
+ * If non existent flow, create it, add it to the tree.
+ * Add skb to the per flow list of skb (fifo).
+ * - Use a special fifo for high prio packets
+ *
+ * dequeue() : serves flows in Round Robin
+ * Note : When a flow becomes empty, we do not immediately remove it from
+ * rb trees, for performance reasons (its expected to send additional packets,
+ * or SLAB cache will reuse socket for another flow)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/hash.h>
+#include <linux/prefetch.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+
+/*
+ * Per flow structure, dynamically allocated
+ */
+struct fq_flow {
+ struct sk_buff *head; /* list of skbs for this flow : first skb */
+ union {
+ struct sk_buff *tail; /* last skb in the list */
+ unsigned long age; /* jiffies when flow was emptied, for gc */
+ };
+ struct rb_node fq_node; /* anchor in fq_root[] trees */
+ struct sock *sk;
+ int qlen; /* number of packets in flow queue */
+ int credit;
+ u32 socket_hash; /* sk_hash */
+ struct fq_flow *next; /* next pointer in RR lists, or &detached */
+
+ struct rb_node rate_node; /* anchor in q->delayed tree */
+ u64 time_next_packet;
+};
+
+struct fq_flow_head {
+ struct fq_flow *first;
+ struct fq_flow *last;
+};
+
+struct fq_sched_data {
+ struct fq_flow_head new_flows;
+
+ struct fq_flow_head old_flows;
+
+ struct rb_root delayed; /* for rate limited flows */
+ u64 time_next_delayed_flow;
+
+ struct fq_flow internal; /* for non classified or high prio packets */
+ u32 quantum;
+ u32 initial_quantum;
+ u32 flow_default_rate;/* rate per flow : bytes per second */
+ u32 flow_max_rate; /* optional max rate per flow */
+ u32 flow_plimit; /* max packets per flow */
+ struct rb_root *fq_root;
+ u8 rate_enable;
+ u8 fq_trees_log;
+
+ u32 flows;
+ u32 inactive_flows;
+ u32 throttled_flows;
+
+ u64 stat_gc_flows;
+ u64 stat_internal_packets;
+ u64 stat_tcp_retrans;
+ u64 stat_throttled;
+ u64 stat_flows_plimit;
+ u64 stat_pkts_too_long;
+ u64 stat_allocation_errors;
+ struct qdisc_watchdog watchdog;
+};
+
+/* special value to mark a detached flow (not on old/new list) */
+static struct fq_flow detached, throttled;
+
+static void fq_flow_set_detached(struct fq_flow *f)
+{
+ f->next = &detached;
+}
+
+static bool fq_flow_is_detached(const struct fq_flow *f)
+{
+ return f->next == &detached;
+}
+
+static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
+{
+ struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
+
+ while (*p) {
+ struct fq_flow *aux;
+
+ parent = *p;
+ aux = container_of(parent, struct fq_flow, rate_node);
+ if (f->time_next_packet >= aux->time_next_packet)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&f->rate_node, parent, p);
+ rb_insert_color(&f->rate_node, &q->delayed);
+ q->throttled_flows++;
+ q->stat_throttled++;
+
+ f->next = &throttled;
+ if (q->time_next_delayed_flow > f->time_next_packet)
+ q->time_next_delayed_flow = f->time_next_packet;
+}
+
+
+static struct kmem_cache *fq_flow_cachep __read_mostly;
+
+static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+{
+ if (head->first)
+ head->last->next = flow;
+ else
+ head->first = flow;
+ head->last = flow;
+ flow->next = NULL;
+}
+
+/* limit number of collected flows per round */
+#define FQ_GC_MAX 8
+#define FQ_GC_AGE (3*HZ)
+
+static bool fq_gc_candidate(const struct fq_flow *f)
+{
+ return fq_flow_is_detached(f) &&
+ time_after(jiffies, f->age + FQ_GC_AGE);
+}
+
+static void fq_gc(struct fq_sched_data *q,
+ struct rb_root *root,
+ struct sock *sk)
+{
+ struct fq_flow *f, *tofree[FQ_GC_MAX];
+ struct rb_node **p, *parent;
+ int fcnt = 0;
+
+ p = &root->rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+
+ f = container_of(parent, struct fq_flow, fq_node);
+ if (f->sk == sk)
+ break;
+
+ if (fq_gc_candidate(f)) {
+ tofree[fcnt++] = f;
+ if (fcnt == FQ_GC_MAX)
+ break;
+ }
+
+ if (f->sk > sk)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+
+ q->flows -= fcnt;
+ q->inactive_flows -= fcnt;
+ q->stat_gc_flows += fcnt;
+ while (fcnt) {
+ struct fq_flow *f = tofree[--fcnt];
+
+ rb_erase(&f->fq_node, root);
+ kmem_cache_free(fq_flow_cachep, f);
+ }
+}
+
+static const u8 prio2band[TC_PRIO_MAX + 1] = {
+ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+};
+
+static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
+{
+ struct rb_node **p, *parent;
+ struct sock *sk = skb->sk;
+ struct rb_root *root;
+ struct fq_flow *f;
+ int band;
+
+ /* warning: no starvation prevention... */
+ band = prio2band[skb->priority & TC_PRIO_MAX];
+ if (unlikely(band == 0))
+ return &q->internal;
+
+ if (unlikely(!sk)) {
+ /* By forcing low order bit to 1, we make sure to not
+ * collide with a local flow (socket pointers are word aligned)
+ */
+ sk = (struct sock *)(skb_get_rxhash(skb) | 1L);
+ }
+
+ root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
+
+ if (q->flows >= (2U << q->fq_trees_log) &&
+ q->inactive_flows > q->flows/2)
+ fq_gc(q, root, sk);
+
+ p = &root->rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+
+ f = container_of(parent, struct fq_flow, fq_node);
+ if (f->sk == sk) {
+ /* socket might have been reallocated, so check
+ * if its sk_hash is the same.
+ * It not, we need to refill credit with
+ * initial quantum
+ */
+ if (unlikely(skb->sk &&
+ f->socket_hash != sk->sk_hash)) {
+ f->credit = q->initial_quantum;
+ f->socket_hash = sk->sk_hash;
+ }
+ return f;
+ }
+ if (f->sk > sk)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+
+ f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!f)) {
+ q->stat_allocation_errors++;
+ return &q->internal;
+ }
+ fq_flow_set_detached(f);
+ f->sk = sk;
+ if (skb->sk)
+ f->socket_hash = sk->sk_hash;
+ f->credit = q->initial_quantum;
+
+ rb_link_node(&f->fq_node, parent, p);
+ rb_insert_color(&f->fq_node, root);
+
+ q->flows++;
+ q->inactive_flows++;
+ return f;
+}
+
+
+/* remove one skb from head of flow queue */
+static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
+{
+ struct sk_buff *skb = flow->head;
+
+ if (skb) {
+ flow->head = skb->next;
+ skb->next = NULL;
+ flow->qlen--;
+ }
+ return skb;
+}
+
+/* We might add in the future detection of retransmits
+ * For the time being, just return false
+ */
+static bool skb_is_retransmit(struct sk_buff *skb)
+{
+ return false;
+}
+
+/* add skb to flow queue
+ * flow queue is a linked list, kind of FIFO, except for TCP retransmits
+ * We special case tcp retransmits to be transmitted before other packets.
+ * We rely on fact that TCP retransmits are unlikely, so we do not waste
+ * a separate queue or a pointer.
+ * head-> [retrans pkt 1]
+ * [retrans pkt 2]
+ * [ normal pkt 1]
+ * [ normal pkt 2]
+ * [ normal pkt 3]
+ * tail-> [ normal pkt 4]
+ */
+static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
+{
+ struct sk_buff *prev, *head = flow->head;
+
+ skb->next = NULL;
+ if (!head) {
+ flow->head = skb;
+ flow->tail = skb;
+ return;
+ }
+ if (likely(!skb_is_retransmit(skb))) {
+ flow->tail->next = skb;
+ flow->tail = skb;
+ return;
+ }
+
+ /* This skb is a tcp retransmit,
+ * find the last retrans packet in the queue
+ */
+ prev = NULL;
+ while (skb_is_retransmit(head)) {
+ prev = head;
+ head = head->next;
+ if (!head)
+ break;
+ }
+ if (!prev) { /* no rtx packet in queue, become the new head */
+ skb->next = flow->head;
+ flow->head = skb;
+ } else {
+ if (prev == flow->tail)
+ flow->tail = skb;
+ else
+ skb->next = prev->next;
+ prev->next = skb;
+ }
+}
+
+static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct fq_flow *f;
+
+ if (unlikely(sch->q.qlen >= sch->limit))
+ return qdisc_drop(skb, sch);
+
+ f = fq_classify(skb, q);
+ if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
+ q->stat_flows_plimit++;
+ return qdisc_drop(skb, sch);
+ }
+
+ f->qlen++;
+ flow_queue_add(f, skb);
+ if (skb_is_retransmit(skb))
+ q->stat_tcp_retrans++;
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ if (fq_flow_is_detached(f)) {
+ fq_flow_add_tail(&q->new_flows, f);
+ if (q->quantum > f->credit)
+ f->credit = q->quantum;
+ q->inactive_flows--;
+ qdisc_unthrottled(sch);
+ }
+ if (unlikely(f == &q->internal)) {
+ q->stat_internal_packets++;
+ qdisc_unthrottled(sch);
+ }
+ sch->q.qlen++;
+
+ return NET_XMIT_SUCCESS;
+}
+
+static void fq_check_throttled(struct fq_sched_data *q, u64 now)
+{
+ struct rb_node *p;
+
+ if (q->time_next_delayed_flow > now)
+ return;
+
+ q->time_next_delayed_flow = ~0ULL;
+ while ((p = rb_first(&q->delayed)) != NULL) {
+ struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
+
+ if (f->time_next_packet > now) {
+ q->time_next_delayed_flow = f->time_next_packet;
+ break;
+ }
+ rb_erase(p, &q->delayed);
+ q->throttled_flows--;
+ fq_flow_add_tail(&q->old_flows, f);
+ }
+}
+
+static struct sk_buff *fq_dequeue(struct Qdisc *sch)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ u64 now = ktime_to_ns(ktime_get());
+ struct fq_flow_head *head;
+ struct sk_buff *skb;
+ struct fq_flow *f;
+
+ skb = fq_dequeue_head(&q->internal);
+ if (skb)
+ goto out;
+ fq_check_throttled(q, now);
+begin:
+ head = &q->new_flows;
+ if (!head->first) {
+ head = &q->old_flows;
+ if (!head->first) {
+ if (q->time_next_delayed_flow != ~0ULL)
+ qdisc_watchdog_schedule_ns(&q->watchdog,
+ q->time_next_delayed_flow);
+ return NULL;
+ }
+ }
+ f = head->first;
+
+ if (f->credit <= 0) {
+ f->credit += q->quantum;
+ head->first = f->next;
+ fq_flow_add_tail(&q->old_flows, f);
+ goto begin;
+ }
+
+ if (unlikely(f->head && now < f->time_next_packet)) {
+ head->first = f->next;
+ fq_flow_set_throttled(q, f);
+ goto begin;
+ }
+
+ skb = fq_dequeue_head(f);
+ if (!skb) {
+ head->first = f->next;
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &q->new_flows) && q->old_flows.first) {
+ fq_flow_add_tail(&q->old_flows, f);
+ } else {
+ fq_flow_set_detached(f);
+ f->age = jiffies;
+ q->inactive_flows++;
+ }
+ goto begin;
+ }
+ prefetch(&skb->end);
+ f->time_next_packet = now;
+ f->credit -= qdisc_pkt_len(skb);
+
+ if (f->credit <= 0 &&
+ q->rate_enable &&
+ skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
+ u32 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
+
+ rate = min(rate, q->flow_max_rate);
+ if (rate) {
+ u64 len = (u64)qdisc_pkt_len(skb) * NSEC_PER_SEC;
+
+ do_div(len, rate);
+ /* Since socket rate can change later,
+ * clamp the delay to 125 ms.
+ * TODO: maybe segment the too big skb, as in commit
+ * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
+ */
+ if (unlikely(len > 125 * NSEC_PER_MSEC)) {
+ len = 125 * NSEC_PER_MSEC;
+ q->stat_pkts_too_long++;
+ }
+
+ f->time_next_packet = now + len;
+ }
+ }
+out:
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
+ sch->q.qlen--;
+ qdisc_unthrottled(sch);
+ return skb;
+}
+
+static void fq_reset(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = fq_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static void fq_rehash(struct fq_sched_data *q,
+ struct rb_root *old_array, u32 old_log,
+ struct rb_root *new_array, u32 new_log)
+{
+ struct rb_node *op, **np, *parent;
+ struct rb_root *oroot, *nroot;
+ struct fq_flow *of, *nf;
+ int fcnt = 0;
+ u32 idx;
+
+ for (idx = 0; idx < (1U << old_log); idx++) {
+ oroot = &old_array[idx];
+ while ((op = rb_first(oroot)) != NULL) {
+ rb_erase(op, oroot);
+ of = container_of(op, struct fq_flow, fq_node);
+ if (fq_gc_candidate(of)) {
+ fcnt++;
+ kmem_cache_free(fq_flow_cachep, of);
+ continue;
+ }
+ nroot = &new_array[hash_32((u32)(long)of->sk, new_log)];
+
+ np = &nroot->rb_node;
+ parent = NULL;
+ while (*np) {
+ parent = *np;
+
+ nf = container_of(parent, struct fq_flow, fq_node);
+ BUG_ON(nf->sk == of->sk);
+
+ if (nf->sk > of->sk)
+ np = &parent->rb_right;
+ else
+ np = &parent->rb_left;
+ }
+
+ rb_link_node(&of->fq_node, parent, np);
+ rb_insert_color(&of->fq_node, nroot);
+ }
+ }
+ q->flows -= fcnt;
+ q->inactive_flows -= fcnt;
+ q->stat_gc_flows += fcnt;
+}
+
+static int fq_resize(struct fq_sched_data *q, u32 log)
+{
+ struct rb_root *array;
+ u32 idx;
+
+ if (q->fq_root && log == q->fq_trees_log)
+ return 0;
+
+ array = kmalloc(sizeof(struct rb_root) << log, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ for (idx = 0; idx < (1U << log); idx++)
+ array[idx] = RB_ROOT;
+
+ if (q->fq_root) {
+ fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
+ kfree(q->fq_root);
+ }
+ q->fq_root = array;
+ q->fq_trees_log = log;
+
+ return 0;
+}
+
+static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
+ [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
+ [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
+ [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
+ [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
+ [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
+ [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
+ [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
+ [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
+};
+
+static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_FQ_MAX + 1];
+ int err, drop_count = 0;
+ u32 fq_log;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+
+ fq_log = q->fq_trees_log;
+
+ if (tb[TCA_FQ_BUCKETS_LOG]) {
+ u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
+
+ if (nval >= 1 && nval <= ilog2(256*1024))
+ fq_log = nval;
+ else
+ err = -EINVAL;
+ }
+ if (tb[TCA_FQ_PLIMIT])
+ sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
+
+ if (tb[TCA_FQ_FLOW_PLIMIT])
+ q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
+
+ if (tb[TCA_FQ_QUANTUM])
+ q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+
+ if (tb[TCA_FQ_INITIAL_QUANTUM])
+ q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+
+ if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
+ q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
+
+ if (tb[TCA_FQ_FLOW_MAX_RATE])
+ q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
+
+ if (tb[TCA_FQ_RATE_ENABLE]) {
+ u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
+
+ if (enable <= 1)
+ q->rate_enable = enable;
+ else
+ err = -EINVAL;
+ }
+
+ if (!err)
+ err = fq_resize(q, fq_log);
+
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = fq_dequeue(sch);
+
+ kfree_skb(skb);
+ drop_count++;
+ }
+ qdisc_tree_decrease_qlen(sch, drop_count);
+
+ sch_tree_unlock(sch);
+ return err;
+}
+
+static void fq_destroy(struct Qdisc *sch)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct rb_root *root;
+ struct rb_node *p;
+ unsigned int idx;
+
+ if (q->fq_root) {
+ for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
+ root = &q->fq_root[idx];
+ while ((p = rb_first(root)) != NULL) {
+ rb_erase(p, root);
+ kmem_cache_free(fq_flow_cachep,
+ container_of(p, struct fq_flow, fq_node));
+ }
+ }
+ kfree(q->fq_root);
+ }
+ qdisc_watchdog_cancel(&q->watchdog);
+}
+
+static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ sch->limit = 10000;
+ q->flow_plimit = 100;
+ q->quantum = 2 * psched_mtu(qdisc_dev(sch));
+ q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
+ q->flow_default_rate = 0;
+ q->flow_max_rate = ~0U;
+ q->rate_enable = 1;
+ q->new_flows.first = NULL;
+ q->old_flows.first = NULL;
+ q->delayed = RB_ROOT;
+ q->fq_root = NULL;
+ q->fq_trees_log = ilog2(1024);
+ qdisc_watchdog_init(&q->watchdog, sch);
+
+ if (opt)
+ err = fq_change(sch, opt);
+ else
+ err = fq_resize(q, q->fq_trees_log);
+
+ return err;
+}
+
+static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
+ nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
+ nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
+ nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
+ nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, opts);
+ return skb->len;
+
+nla_put_failure:
+ return -1;
+}
+
+static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ u64 now = ktime_to_ns(ktime_get());
+ struct tc_fq_qd_stats st = {
+ .gc_flows = q->stat_gc_flows,
+ .highprio_packets = q->stat_internal_packets,
+ .tcp_retrans = q->stat_tcp_retrans,
+ .throttled = q->stat_throttled,
+ .flows_plimit = q->stat_flows_plimit,
+ .pkts_too_long = q->stat_pkts_too_long,
+ .allocation_errors = q->stat_allocation_errors,
+ .flows = q->flows,
+ .inactive_flows = q->inactive_flows,
+ .throttled_flows = q->throttled_flows,
+ .time_next_delayed_flow = q->time_next_delayed_flow - now,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
+ .id = "fq",
+ .priv_size = sizeof(struct fq_sched_data),
+
+ .enqueue = fq_enqueue,
+ .dequeue = fq_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = fq_init,
+ .reset = fq_reset,
+ .destroy = fq_destroy,
+ .change = fq_change,
+ .dump = fq_dump,
+ .dump_stats = fq_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init fq_module_init(void)
+{
+ int ret;
+
+ fq_flow_cachep = kmem_cache_create("fq_flow_cache",
+ sizeof(struct fq_flow),
+ 0, 0, NULL);
+ if (!fq_flow_cachep)
+ return -ENOMEM;
+
+ ret = register_qdisc(&fq_qdisc_ops);
+ if (ret)
+ kmem_cache_destroy(fq_flow_cachep);
+ return ret;
+}
+
+static void __exit fq_module_exit(void)
+{
+ unregister_qdisc(&fq_qdisc_ops);
+ kmem_cache_destroy(fq_flow_cachep);
+}
+
+module_init(fq_module_init)
+module_exit(fq_module_exit)
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 48be3d5c0d92..a74e278654aa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -30,6 +30,10 @@
#include <net/pkt_sched.h>
#include <net/dst.h>
+/* Qdisc to use by default */
+const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
+EXPORT_SYMBOL(default_qdisc_ops);
+
/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
@@ -530,12 +534,11 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops)
+ const struct Qdisc_ops *ops)
{
void *p;
struct Qdisc *sch;
@@ -579,10 +582,14 @@ errout:
}
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops, unsigned int parentid)
+ const struct Qdisc_ops *ops,
+ unsigned int parentid)
{
struct Qdisc *sch;
+ if (!try_module_get(ops->owner))
+ goto errout;
+
sch = qdisc_alloc(dev_queue, ops);
if (IS_ERR(sch))
goto errout;
@@ -686,7 +693,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev_queue,
- &pfifo_fast_ops, TC_H_ROOT);
+ default_qdisc_ops, TC_H_ROOT);
if (!qdisc) {
netdev_info(dev, "activation failed\n");
return;
@@ -739,9 +746,8 @@ void dev_activate(struct net_device *dev)
int need_watchdog;
/* No queueing discipline is attached to device;
- create default one i.e. pfifo_fast for devices,
- which need queueing and noqueue_qdisc for
- virtual interfaces
+ * create default one for devices, which need queueing
+ * and noqueue_qdisc for virtual interfaces
*/
if (dev->qdisc == &noop_qdisc)
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 5da78a19ac9a..2e56185736d6 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -57,7 +57,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
dev_queue = netdev_get_tx_queue(dev, ntx);
- qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+ qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(ntx + 1)));
if (qdisc == NULL)
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index accec33c454c..d44c868cb537 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -124,7 +124,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < dev->num_tx_queues; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
- qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+ qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)));
if (qdisc == NULL) {
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 82f6016d89ab..a6d788d45216 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -412,12 +412,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* If a delay is expected, orphan the skb. (orphaning usually takes
* place at TX completion time, so _before_ the link transit delay)
- * Ideally, this orphaning should be done after the rate limiting
- * module, because this breaks TCP Small Queue, and other mechanisms
- * based on socket sk_wmem_alloc.
*/
if (q->latency || q->jitter)
- skb_orphan(skb);
+ skb_orphan_partial(skb);
/*
* If we need to duplicate packet, then re-insert at top of the
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index ab67efc64b24..cef509985192 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index ba1dfc3f8def..8c4fa5dec824 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -22,16 +22,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 64977ea0f9c5..077bb070052b 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -27,19 +27,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 5780565f5b7d..7bd5ed4a8657 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -24,17 +24,11 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -201,9 +195,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
/* This is the biggest possible DATA chunk that can fit into
* the packet
*/
- max_data = asoc->pathmtu -
+ max_data = (asoc->pathmtu -
sctp_sk(asoc->base.sk)->pf->af->net_header_len -
- sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
+ sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk)) & ~3;
max = asoc->frag_point;
/* If the the peer requested that we authenticate DATA chunks
diff --git a/net/sctp/command.c b/net/sctp/command.c
index c0044019db9e..3d9a9ff69c03 100644
--- a/net/sctp/command.c
+++ b/net/sctp/command.c
@@ -25,17 +25,11 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index f4998780d6df..e89015d8935a 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <net/sctp/sctp.h>
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 9e3d257de0e0..09b8daac87c8 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
* Jon Grimm <jgrimm@austin.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 3fa4d858c35a..5f2068679f83 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
@@ -87,15 +81,7 @@ static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
{
struct sctphdr *sh = sctp_hdr(skb);
__le32 cmp = sh->checksum;
- struct sk_buff *list;
- __le32 val;
- __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
-
- skb_walk_frags(skb, list)
- tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
- tmp);
-
- val = sctp_end_cksum(tmp);
+ __le32 val = sctp_compute_cksum(skb, 0);
if (val != cmp) {
/* CRC failure, dump it. */
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cb25f040fed0..5856932fdc38 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -30,17 +30,11 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 09ffcc912d23..da613ceae28c 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -27,10 +27,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Le Yanqun <yanqun.le@nokia.com>
@@ -42,9 +39,6 @@
*
* Based on:
* linux/net/ipv6/tcp_ipv6.c
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -351,7 +345,7 @@ out:
rt = (struct rt6_info *)dst;
t->dst = dst;
-
+ t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr,
&fl6->saddr);
} else {
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index fe012c44f8df..5ea573b37648 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -26,16 +26,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a46d1eb41762..0ac3a65daccb 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -26,19 +26,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@austin.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index ef9e2bbc0f2f..94df75877869 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Jon Grimm <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index 794bb14decde..ce1ffd811775 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
* Karl Knutson <karl@athena.chicago.il.us>
* Ardelle Fan <ardelle.fan@intel.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index e62c22535be4..53c452efb40b 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -46,6 +46,10 @@ static int port __read_mostly = 0;
MODULE_PARM_DESC(port, "Port to match (0=all)");
module_param(port, int, 0);
+static unsigned int fwmark __read_mostly = 0;
+MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
+module_param(fwmark, uint, 0);
+
static int bufsize __read_mostly = 64 * 1024;
MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
module_param(bufsize, int, 0);
@@ -129,15 +133,19 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
void *arg,
sctp_cmd_seq_t *commands)
{
+ struct sctp_chunk *chunk = arg;
+ struct sk_buff *skb = chunk->skb;
struct sctp_transport *sp;
static __u32 lcwnd = 0;
struct timespec now;
sp = asoc->peer.primary_path;
- if ((full || sp->cwnd != lcwnd) &&
- (!port || asoc->peer.port == port ||
- ep->base.bind_addr.port == port)) {
+ if (((port == 0 && fwmark == 0) ||
+ asoc->peer.port == port ||
+ ep->base.bind_addr.port == port ||
+ (fwmark > 0 && skb->mark == fwmark)) &&
+ (full || sp->cwnd != lcwnd)) {
lcwnd = sp->cwnd;
getnstimeofday(&now);
@@ -155,13 +163,8 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
if (sp == asoc->peer.primary_path)
printl("*");
- if (sp->ipaddr.sa.sa_family == AF_INET)
- printl("%pI4 ", &sp->ipaddr.v4.sin_addr);
- else
- printl("%pI6 ", &sp->ipaddr.v6.sin6_addr);
-
- printl("%2u %8u %8u %8u %8u %8u ",
- sp->state, sp->cwnd, sp->ssthresh,
+ printl("%pISc %2u %8u %8u %8u %8u %8u ",
+ &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh,
sp->flight_size, sp->partial_bytes_acked,
sp->pathmtu);
}
@@ -203,8 +206,8 @@ static __init int sctpprobe_init(void)
if (ret)
goto remove_proc;
- pr_info("probe registered (port=%d)\n", port);
-
+ pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
+ port, fwmark, bufsize);
return 0;
remove_proc:
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 62526c477050..0c0642156842 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -22,16 +22,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
@@ -232,7 +226,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
sk = epb->sk;
if (!net_eq(sock_net(sk), seq_file_net(seq)))
continue;
- seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
+ seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
sctp_sk(sk)->type, sk->sk_state, hash,
epb->bind_addr.port,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
@@ -342,7 +336,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
continue;
seq_printf(seq,
"%8pK %8pK %-3d %-3d %-2d %-4d "
- "%4d %8d %8d %7d %5lu %-5d %5d ",
+ "%4d %8d %8d %7u %5lu %-5d %5d ",
assoc, sk, sctp_sk(sk)->type, sk->sk_state,
assoc->state, hash,
assoc->assoc_id,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 4a17494d736c..5e17092f4ada 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Sridhar Samudrala <sri@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1547,7 +1541,7 @@ module_exit(sctp_exit);
*/
MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132");
MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132");
-MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>");
+MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>");
MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
module_param_named(no_checksums, sctp_checksum_disable, bool, 0644);
MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification");
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 362ae6e2fd93..d244a23ab8d3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -45,9 +42,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -68,8 +62,12 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
- __u8 type, __u8 flags, int paylen);
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen);
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+ __u8 flags, int paylen);
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen);
static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const struct sctp_chunk *init_chunk,
@@ -82,6 +80,28 @@ static int sctp_process_param(struct sctp_association *asoc,
static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
const void *data);
+/* Control chunk destructor */
+static void sctp_control_release_owner(struct sk_buff *skb)
+{
+ /*TODO: do memory release */
+}
+
+static void sctp_control_set_owner_w(struct sctp_chunk *chunk)
+{
+ struct sctp_association *asoc = chunk->asoc;
+ struct sk_buff *skb = chunk->skb;
+
+ /* TODO: properly account for control chunks.
+ * To do it right we'll need:
+ * 1) endpoint if association isn't known.
+ * 2) proper memory accounting.
+ *
+ * For now don't do anything for now.
+ */
+ skb->sk = asoc ? asoc->base.sk : NULL;
+ skb->destructor = sctp_control_release_owner;
+}
+
/* What was the inbound interface for this chunk? */
int sctp_chunk_iif(const struct sctp_chunk *chunk)
{
@@ -296,7 +316,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
* PLEASE DO NOT FIXME [This version does not support Host Name.]
*/
- retval = sctp_make_chunk(asoc, SCTP_CID_INIT, 0, chunksize);
+ retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize);
if (!retval)
goto nodata;
@@ -443,7 +463,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
num_ext);
/* Now allocate and fill out the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
+ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
if (!retval)
goto nomem_chunk;
@@ -548,7 +568,7 @@ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc,
cookie_len = asoc->peer.cookie_len;
/* Build a cookie echo chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len);
+ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len);
if (!retval)
goto nodata;
retval->subh.cookie_hdr =
@@ -593,7 +613,7 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ACK, 0, 0);
+ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -641,8 +661,8 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
sctp_cwrhdr_t cwr;
cwr.lowest_tsn = htonl(lowest_tsn);
- retval = sctp_make_chunk(asoc, SCTP_CID_ECN_CWR, 0,
- sizeof(sctp_cwrhdr_t));
+ retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0,
+ sizeof(sctp_cwrhdr_t));
if (!retval)
goto nodata;
@@ -675,8 +695,8 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
sctp_ecnehdr_t ecne;
ecne.lowest_tsn = htonl(lowest_tsn);
- retval = sctp_make_chunk(asoc, SCTP_CID_ECN_ECNE, 0,
- sizeof(sctp_ecnehdr_t));
+ retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0,
+ sizeof(sctp_ecnehdr_t));
if (!retval)
goto nodata;
retval->subh.ecne_hdr =
@@ -712,7 +732,7 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
dp.ssn = htons(ssn);
chunk_len = sizeof(dp) + data_len;
- retval = sctp_make_chunk(asoc, SCTP_CID_DATA, flags, chunk_len);
+ retval = sctp_make_data(asoc, flags, chunk_len);
if (!retval)
goto nodata;
@@ -759,7 +779,7 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
+ sizeof(__u32) * num_dup_tsns;
/* Create the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_SACK, 0, len);
+ retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len);
if (!retval)
goto nodata;
@@ -838,8 +858,8 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
shut.cum_tsn_ack = htonl(ctsn);
- retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN, 0,
- sizeof(sctp_shutdownhdr_t));
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0,
+ sizeof(sctp_shutdownhdr_t));
if (!retval)
goto nodata;
@@ -857,7 +877,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0);
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -886,7 +906,7 @@ struct sctp_chunk *sctp_make_shutdown_complete(
*/
flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T;
- retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0);
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -925,7 +945,7 @@ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
flags = SCTP_CHUNK_FLAG_T;
}
- retval = sctp_make_chunk(asoc, SCTP_CID_ABORT, flags, hint);
+ retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -1117,7 +1137,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
struct sctp_chunk *retval;
sctp_sender_hb_info_t hbinfo;
- retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
+ retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
if (!retval)
goto nodata;
@@ -1145,7 +1165,7 @@ struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen);
+ retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen);
if (!retval)
goto nodata;
@@ -1177,8 +1197,8 @@ static struct sctp_chunk *sctp_make_op_error_space(
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0,
- sizeof(sctp_errhdr_t) + size);
+ retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0,
+ sizeof(sctp_errhdr_t) + size);
if (!retval)
goto nodata;
@@ -1248,7 +1268,7 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc)
if (unlikely(!hmac_desc))
return NULL;
- retval = sctp_make_chunk(asoc, SCTP_CID_AUTH, 0,
+ retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0,
hmac_desc->hmac_len + sizeof(sctp_authhdr_t));
if (!retval)
return NULL;
@@ -1351,8 +1371,8 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk)
/* Create a new chunk, setting the type and flags headers from the
* arguments, reserving enough space for a 'paylen' byte payload.
*/
-static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
- __u8 type, __u8 flags, int paylen)
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen)
{
struct sctp_chunk *retval;
sctp_chunkhdr_t *chunk_hdr;
@@ -1385,14 +1405,27 @@ static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
if (sctp_auth_send_cid(type, asoc))
retval->auth = 1;
- /* Set the skb to the belonging sock for accounting. */
- skb->sk = sk;
-
return retval;
nodata:
return NULL;
}
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+ __u8 flags, int paylen)
+{
+ return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen);
+}
+
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen)
+{
+ struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen);
+
+ if (chunk)
+ sctp_control_set_owner_w(chunk);
+
+ return chunk;
+}
/* Release the memory occupied by a chunk. */
static void sctp_chunk_destroy(struct sctp_chunk *chunk)
@@ -2207,25 +2240,23 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
struct sctp_chunk **errp)
{
union sctp_params param;
- int has_cookie = 0;
+ bool has_cookie = false;
int result;
- /* Verify stream values are non-zero. */
- if ((0 == peer_init->init_hdr.num_outbound_streams) ||
- (0 == peer_init->init_hdr.num_inbound_streams) ||
- (0 == peer_init->init_hdr.init_tag) ||
- (SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) {
-
+ /* Check for missing mandatory parameters. Note: Initial TSN is
+ * also mandatory, but is not checked here since the valid range
+ * is 0..2**32-1. RFC4960, section 3.3.3.
+ */
+ if (peer_init->init_hdr.num_outbound_streams == 0 ||
+ peer_init->init_hdr.num_inbound_streams == 0 ||
+ peer_init->init_hdr.init_tag == 0 ||
+ ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW)
return sctp_process_inv_mandatory(asoc, chunk, errp);
- }
- /* Check for missing mandatory parameters. */
sctp_walk_params(param, peer_init, init_hdr.params) {
-
- if (SCTP_PARAM_STATE_COOKIE == param.p->type)
- has_cookie = 1;
-
- } /* for (loop through all parameters) */
+ if (param.p->type == SCTP_PARAM_STATE_COOKIE)
+ has_cookie = true;
+ }
/* There is a possibility that a parameter length was bad and
* in that case we would have stoped walking the parameters.
@@ -2733,7 +2764,7 @@ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
length += addrlen;
/* Create the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF, 0, length);
+ retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length);
if (!retval)
return NULL;
@@ -2917,7 +2948,7 @@ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *as
int length = sizeof(asconf) + vparam_len;
/* Create the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF_ACK, 0, length);
+ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length);
if (!retval)
return NULL;
@@ -3448,7 +3479,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
hint = (nstreams + 1) * sizeof(__u32);
- retval = sctp_make_chunk(asoc, SCTP_CID_FWD_TSN, 0, hint);
+ retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint);
if (!retval)
return NULL;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9da68852ee94..666c66842799 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -42,9 +39,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f6b7109195a6..dfe3f36ff2aa 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -45,9 +42,6 @@
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 84d98d8a5a74..c5999b2dde7d 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c6670d2e3f8d..d5d5882a2891 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -34,10 +34,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -52,9 +49,6 @@
* Ryan Layer <rmlayer@us.ibm.com>
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index da8603523808..6007124aefa0 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -24,16 +24,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 9a5c4c9eddaf..6b36561a1b3b 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -25,10 +25,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Mingqin Liu <liuming@us.ibm.com>
@@ -36,9 +33,6 @@
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <net/sctp/structs.h>
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 8fdd16046d66..e332efb124cc 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -30,10 +30,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
* Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index b46019568a86..fbda20028285 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -27,19 +27,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Jon Grimm <jgrimm@us.ibm.com>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 44a45dbee4df..81089ed65456 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -28,19 +28,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 04e3d470f877..1c1484ed605d 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -27,18 +27,12 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 7762b9f8a8b7..9c9caaa5e0d3 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -442,7 +442,7 @@ static void svc_tcp_write_space(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock)
+ if (sk_stream_is_writeable(sk) && sock)
clear_bit(SOCK_NOSPACE, &sock->flags);
svc_write_space(sk);
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ddf0602603bd..d6656d7768f4 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1602,7 +1602,7 @@ static void xs_tcp_write_space(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
/* from net/core/stream.c:sk_stream_write_space */
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ if (sk_stream_is_writeable(sk))
xs_write_space(sk);
read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c4ce243824bb..86de99ad2976 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1479,7 +1479,8 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
MAX_SKB_FRAGS * PAGE_SIZE);
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ msg->msg_flags & MSG_DONTWAIT, &err,
+ PAGE_ALLOC_COSTLY_ORDER);
if (skb == NULL)
goto out;
@@ -1596,6 +1597,10 @@ out:
return err;
}
+/* We use paged skbs for stream sockets, and limit occupancy to 32768
+ * bytes, and a minimun of a full page.
+ */
+#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
@@ -1609,6 +1614,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct scm_cookie tmp_scm;
bool fds_sent = false;
int max_level;
+ int data_len;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
@@ -1635,40 +1641,22 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto pipe_err;
while (sent < len) {
- /*
- * Optimisation for the fact that under 0.01% of X
- * messages typically need breaking up.
- */
-
- size = len-sent;
+ size = len - sent;
/* Keep two messages in the pipe so it schedules better */
- if (size > ((sk->sk_sndbuf >> 1) - 64))
- size = (sk->sk_sndbuf >> 1) - 64;
+ size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
- if (size > SKB_MAX_ALLOC)
- size = SKB_MAX_ALLOC;
-
- /*
- * Grab a buffer
- */
+ /* allow fallback to order-0 allocations */
+ size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
- skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
- &err);
+ data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
- if (skb == NULL)
+ skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
+ msg->msg_flags & MSG_DONTWAIT, &err,
+ get_order(UNIX_SKB_FRAGS_SZ));
+ if (!skb)
goto out_err;
- /*
- * If you pass two values to the sock_alloc_send_skb
- * it tries to grab the large buffer with GFP_NOFS
- * (which can fail easily), and if it fails grab the
- * fallback size buffer which is under a page and will
- * succeed. [Alan]
- */
- size = min_t(int, size, skb_tailroom(skb));
-
-
/* Only send the fds in the first buffer */
err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
if (err < 0) {
@@ -1678,7 +1666,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
max_level = err + 1;
fds_sent = true;
- err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ skb_put(skb, size - data_len);
+ skb->data_len = data_len;
+ skb->len = size;
+ err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
+ sent, size);
if (err) {
kfree_skb(skb);
goto out_err;
@@ -1890,6 +1882,11 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
return timeo;
}
+static unsigned int unix_skb_len(const struct sk_buff *skb)
+{
+ return skb->len - UNIXCB(skb).consumed;
+}
+
static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
@@ -1977,8 +1974,8 @@ again:
}
skip = sk_peek_offset(sk, flags);
- while (skip >= skb->len) {
- skip -= skb->len;
+ while (skip >= unix_skb_len(skb)) {
+ skip -= unix_skb_len(skb);
last = skb;
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (!skb)
@@ -2005,8 +2002,9 @@ again:
sunaddr = NULL;
}
- chunk = min_t(unsigned int, skb->len - skip, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
+ chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
+ if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
+ msg->msg_iov, chunk)) {
if (copied == 0)
copied = -EFAULT;
break;
@@ -2016,14 +2014,14 @@ again:
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
- skb_pull(skb, chunk);
+ UNIXCB(skb).consumed += chunk;
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
- if (skb->len)
+ if (unix_skb_len(skb))
break;
skb_unlink(skb, &sk->sk_receive_queue);
@@ -2107,7 +2105,7 @@ long unix_inq_len(struct sock *sk)
if (sk->sk_type == SOCK_STREAM ||
sk->sk_type == SOCK_SEQPACKET) {
skb_queue_walk(&sk->sk_receive_queue, skb)
- amount += skb->len;
+ amount += unix_skb_len(skb);
} else {
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 4d9334683f84..545c08b8a1d4 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -96,8 +96,7 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/sock.h>
-
-#include "af_vsock.h"
+#include <net/af_vsock.h>
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
static void vsock_sk_destruct(struct sock *sk);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index ffc11df02af2..9d6986634e0b 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -34,8 +34,8 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/sock.h>
+#include <net/af_vsock.h>
-#include "af_vsock.h"
#include "vmci_transport_notify.h"
static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index fd88ea8924e4..ce6c9623d5f0 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -19,8 +19,8 @@
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
-#include "vsock_addr.h"
-#include "af_vsock.h"
+#include <net/vsock_addr.h>
+#include <net/af_vsock.h>
/* If the packet format changes in a release then this should change too. */
#define VMCI_TRANSPORT_PACKET_VERSION 1
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c
index ec2611b4ea0e..82486ee55eac 100644
--- a/net/vmw_vsock/vsock_addr.c
+++ b/net/vmw_vsock/vsock_addr.c
@@ -17,8 +17,7 @@
#include <linux/socket.h>
#include <linux/stddef.h>
#include <net/sock.h>
-
-#include "vsock_addr.h"
+#include <net/vsock_addr.h>
void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port)
{
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a8c29fa4f1b3..67153964aad2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -462,6 +462,14 @@ int wiphy_register(struct wiphy *wiphy)
return -EINVAL;
#endif
+ if (WARN_ON(wiphy->coalesce &&
+ (!wiphy->coalesce->n_rules ||
+ !wiphy->coalesce->n_patterns) &&
+ (!wiphy->coalesce->pattern_min_len ||
+ wiphy->coalesce->pattern_min_len >
+ wiphy->coalesce->pattern_max_len)))
+ return -EINVAL;
+
if (WARN_ON(wiphy->ap_sme_capa &&
!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
return -EINVAL;
@@ -668,6 +676,7 @@ void wiphy_unregister(struct wiphy *wiphy)
rdev_set_wakeup(rdev, false);
#endif
cfg80211_rdev_free_wowlan(rdev);
+ cfg80211_rdev_free_coalesce(rdev);
}
EXPORT_SYMBOL(wiphy_unregister);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a6b45bf00f33..9ad43c619c54 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -79,6 +79,8 @@ struct cfg80211_registered_device {
/* netlink port which started critical protocol (0 means not started) */
u32 crit_proto_nlportid;
+ struct cfg80211_coalesce *coalesce;
+
/* must be last because of the way we do wiphy_priv(),
* and it should at least be aligned to NETDEV_ALIGN */
struct wiphy wiphy __aligned(NETDEV_ALIGN);
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 30c49202ee4d..0553fd4d85ae 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -167,9 +167,12 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
* basic rates
*/
if (!setup->basic_rates) {
+ enum nl80211_bss_scan_width scan_width;
struct ieee80211_supported_band *sband =
rdev->wiphy.bands[setup->chandef.chan->band];
- setup->basic_rates = ieee80211_mandatory_rates(sband);
+ scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
+ setup->basic_rates = ieee80211_mandatory_rates(sband,
+ scan_width);
}
if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef))
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index bfac5e186f57..8d49c1ce3dea 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -621,7 +621,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
}
bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
- const u8 *buf, size_t len, gfp_t gfp)
+ const u8 *buf, size_t len, u32 flags, gfp_t gfp)
{
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
@@ -664,7 +664,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
/* Indicate the received Action frame to user space */
if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
freq, sig_mbm,
- buf, len, gfp))
+ buf, len, flags, gfp))
continue;
result = true;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5f6e982cdcf4..af8d84a4a5b2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -349,6 +349,11 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
+ [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
+ [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
+ [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
+ [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_U16 },
+ [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_U16 },
};
/* policy for the key attributes */
@@ -403,6 +408,14 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
[NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 },
};
+/* policy for coalesce rule attributes */
+static const struct nla_policy
+nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = {
+ [NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 },
+ [NL80211_ATTR_COALESCE_RULE_CONDITION] = { .type = NLA_U32 },
+ [NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED },
+};
+
/* policy for GTK rekey offload attributes */
static const struct nla_policy
nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
@@ -976,7 +989,7 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
return -ENOBUFS;
if (dev->wiphy.wowlan->n_patterns) {
- struct nl80211_wowlan_pattern_support pat = {
+ struct nl80211_pattern_support pat = {
.max_patterns = dev->wiphy.wowlan->n_patterns,
.min_pattern_len = dev->wiphy.wowlan->pattern_min_len,
.max_pattern_len = dev->wiphy.wowlan->pattern_max_len,
@@ -997,6 +1010,27 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
}
#endif
+static int nl80211_send_coalesce(struct sk_buff *msg,
+ struct cfg80211_registered_device *dev)
+{
+ struct nl80211_coalesce_rule_support rule;
+
+ if (!dev->wiphy.coalesce)
+ return 0;
+
+ rule.max_rules = dev->wiphy.coalesce->n_rules;
+ rule.max_delay = dev->wiphy.coalesce->max_delay;
+ rule.pat.max_patterns = dev->wiphy.coalesce->n_patterns;
+ rule.pat.min_pattern_len = dev->wiphy.coalesce->pattern_min_len;
+ rule.pat.max_pattern_len = dev->wiphy.coalesce->pattern_max_len;
+ rule.pat.max_pkt_offset = dev->wiphy.coalesce->max_pkt_offset;
+
+ if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule))
+ return -ENOBUFS;
+
+ return 0;
+}
+
static int nl80211_send_band_rateinfo(struct sk_buff *msg,
struct ieee80211_supported_band *sband)
{
@@ -1395,6 +1429,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
if (state->split) {
CMD(crit_proto_start, CRIT_PROTOCOL_START);
CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
+ if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
+ CMD(channel_switch, CHANNEL_SWITCH);
}
#ifdef CONFIG_NL80211_TESTMODE
@@ -1515,6 +1551,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
dev->wiphy.vht_capa_mod_mask))
goto nla_put_failure;
+ state->split_start++;
+ break;
+ case 10:
+ if (nl80211_send_coalesce(msg, dev))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -5580,6 +5622,111 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
return err;
}
+static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_csa_settings params;
+ /* csa_attrs is defined static to avoid waste of stack size - this
+ * function is called under RTNL lock, so this should not be a problem.
+ */
+ static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1];
+ u8 radar_detect_width = 0;
+ int err;
+
+ if (!rdev->ops->channel_switch ||
+ !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
+ return -EOPNOTSUPP;
+
+ /* may add IBSS support later */
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
+ !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT])
+ return -EINVAL;
+
+ /* only important for AP, IBSS and mesh create IEs internally */
+ if (!info->attrs[NL80211_ATTR_CSA_IES])
+ return -EINVAL;
+
+ /* useless if AP is not running */
+ if (!wdev->beacon_interval)
+ return -EINVAL;
+
+ params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
+
+ err = nl80211_parse_beacon(info->attrs, &params.beacon_after);
+ if (err)
+ return err;
+
+ err = nla_parse_nested(csa_attrs, NL80211_ATTR_MAX,
+ info->attrs[NL80211_ATTR_CSA_IES],
+ nl80211_policy);
+ if (err)
+ return err;
+
+ err = nl80211_parse_beacon(csa_attrs, &params.beacon_csa);
+ if (err)
+ return err;
+
+ if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON])
+ return -EINVAL;
+
+ params.counter_offset_beacon =
+ nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
+ if (params.counter_offset_beacon >= params.beacon_csa.tail_len)
+ return -EINVAL;
+
+ /* sanity check - counters should be the same */
+ if (params.beacon_csa.tail[params.counter_offset_beacon] !=
+ params.count)
+ return -EINVAL;
+
+ if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) {
+ params.counter_offset_presp =
+ nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
+ if (params.counter_offset_presp >=
+ params.beacon_csa.probe_resp_len)
+ return -EINVAL;
+
+ if (params.beacon_csa.probe_resp[params.counter_offset_presp] !=
+ params.count)
+ return -EINVAL;
+ }
+
+ err = nl80211_parse_chandef(rdev, info, &params.chandef);
+ if (err)
+ return err;
+
+ if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
+ return -EINVAL;
+
+ err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
+ if (err < 0) {
+ return err;
+ } else if (err) {
+ radar_detect_width = BIT(params.chandef.width);
+ params.radar_required = true;
+ }
+
+ err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
+ params.chandef.chan,
+ CHAN_MODE_SHARED,
+ radar_detect_width);
+ if (err)
+ return err;
+
+ if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
+ params.block_tx = true;
+
+ return rdev_channel_switch(rdev, dev, &params);
+}
+
static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
u32 seq, int flags,
struct cfg80211_registered_device *rdev,
@@ -5641,6 +5788,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
goto nla_put_failure;
if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
+ nla_put_u32(msg, NL80211_BSS_CHAN_WIDTH, res->scan_width) ||
nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
jiffies_to_msecs(jiffies - intbss->ts)))
goto nla_put_failure;
@@ -6321,6 +6469,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
switch (ibss.chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
break;
case NL80211_CHAN_WIDTH_20:
@@ -6348,6 +6498,19 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ memcpy(&ibss.ht_capa_mask,
+ nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
+ sizeof(ibss.ht_capa_mask));
+
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+ if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ return -EINVAL;
+ memcpy(&ibss.ht_capa,
+ nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
+ sizeof(ibss.ht_capa));
+ }
+
if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
!nl80211_parse_mcast_rate(rdev, ibss.mcast_rate,
nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
@@ -6430,19 +6593,30 @@ static struct genl_multicast_group nl80211_testmode_mcgrp = {
static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev =
+ __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs);
int err;
+ if (!rdev->ops->testmode_cmd)
+ return -EOPNOTSUPP;
+
+ if (IS_ERR(wdev)) {
+ err = PTR_ERR(wdev);
+ if (err != -EINVAL)
+ return err;
+ wdev = NULL;
+ } else if (wdev->wiphy != &rdev->wiphy) {
+ return -EINVAL;
+ }
+
if (!info->attrs[NL80211_ATTR_TESTDATA])
return -EINVAL;
- err = -EOPNOTSUPP;
- if (rdev->ops->testmode_cmd) {
- rdev->testmode_info = info;
- err = rdev_testmode_cmd(rdev,
+ rdev->testmode_info = info;
+ err = rdev_testmode_cmd(rdev, wdev,
nla_data(info->attrs[NL80211_ATTR_TESTDATA]),
nla_len(info->attrs[NL80211_ATTR_TESTDATA]));
- rdev->testmode_info = NULL;
- }
+ rdev->testmode_info = NULL;
return err;
}
@@ -7404,14 +7578,12 @@ static int nl80211_set_cqm_txe(struct genl_info *info,
u32 rate, u32 pkts, u32 intvl)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct wireless_dev *wdev;
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
if (rate > 100 || intvl > NL80211_CQM_TXE_MAX_INTVL)
return -EINVAL;
- wdev = dev->ieee80211_ptr;
-
if (!rdev->ops->set_cqm_txe_config)
return -EOPNOTSUPP;
@@ -7426,13 +7598,15 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
s32 threshold, u32 hysteresis)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct wireless_dev *wdev;
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
if (threshold > 0)
return -EINVAL;
- wdev = dev->ieee80211_ptr;
+ /* disabling - hysteresis should also be zero then */
+ if (threshold == 0)
+ hysteresis = 0;
if (!rdev->ops->set_cqm_rssi_config)
return -EOPNOTSUPP;
@@ -7451,36 +7625,33 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
int err;
cqm = info->attrs[NL80211_ATTR_CQM];
- if (!cqm) {
- err = -EINVAL;
- goto out;
- }
+ if (!cqm)
+ return -EINVAL;
err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm,
nl80211_attr_cqm_policy);
if (err)
- goto out;
+ return err;
if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] &&
attrs[NL80211_ATTR_CQM_RSSI_HYST]) {
- s32 threshold;
- u32 hysteresis;
- threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
- hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
- err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
- } else if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
- attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
- attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
- u32 rate, pkts, intvl;
- rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
- pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
- intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
- err = nl80211_set_cqm_txe(info, rate, pkts, intvl);
- } else
- err = -EINVAL;
+ s32 threshold = nla_get_s32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
+ u32 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
-out:
- return err;
+ return nl80211_set_cqm_rssi(info, threshold, hysteresis);
+ }
+
+ if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
+ attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
+ attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
+ u32 rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
+ u32 pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
+ u32 intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
+
+ return nl80211_set_cqm_txe(info, rate, pkts, intvl);
+ }
+
+ return -EINVAL;
}
static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
@@ -7596,12 +7767,11 @@ static int nl80211_send_wowlan_patterns(struct sk_buff *msg,
if (!nl_pat)
return -ENOBUFS;
pat_len = wowlan->patterns[i].pattern_len;
- if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
- DIV_ROUND_UP(pat_len, 8),
+ if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8),
wowlan->patterns[i].mask) ||
- nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
- pat_len, wowlan->patterns[i].pattern) ||
- nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET,
+ nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len,
+ wowlan->patterns[i].pattern) ||
+ nla_put_u32(msg, NL80211_PKTPAT_OFFSET,
wowlan->patterns[i].pkt_offset))
return -ENOBUFS;
nla_nest_end(msg, nl_pat);
@@ -7942,7 +8112,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
struct nlattr *pat;
int n_patterns = 0;
int rem, pat_len, mask_len, pkt_offset;
- struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT];
+ struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
rem)
@@ -7961,26 +8131,25 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
rem) {
- nla_parse(pat_tb, MAX_NL80211_WOWLAN_PKTPAT,
- nla_data(pat), nla_len(pat), NULL);
+ nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
+ nla_len(pat), NULL);
err = -EINVAL;
- if (!pat_tb[NL80211_WOWLAN_PKTPAT_MASK] ||
- !pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN])
+ if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ !pat_tb[NL80211_PKTPAT_PATTERN])
goto error;
- pat_len = nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]);
+ pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]);
mask_len = DIV_ROUND_UP(pat_len, 8);
- if (nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]) !=
- mask_len)
+ if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len)
goto error;
if (pat_len > wowlan->pattern_max_len ||
pat_len < wowlan->pattern_min_len)
goto error;
- if (!pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET])
+ if (!pat_tb[NL80211_PKTPAT_OFFSET])
pkt_offset = 0;
else
pkt_offset = nla_get_u32(
- pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET]);
+ pat_tb[NL80211_PKTPAT_OFFSET]);
if (pkt_offset > wowlan->max_pkt_offset)
goto error;
new_triggers.patterns[i].pkt_offset = pkt_offset;
@@ -7994,11 +8163,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
new_triggers.patterns[i].pattern =
new_triggers.patterns[i].mask + mask_len;
memcpy(new_triggers.patterns[i].mask,
- nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]),
+ nla_data(pat_tb[NL80211_PKTPAT_MASK]),
mask_len);
new_triggers.patterns[i].pattern_len = pat_len;
memcpy(new_triggers.patterns[i].pattern,
- nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]),
+ nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
pat_len);
i++;
}
@@ -8037,6 +8206,264 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
}
#endif
+static int nl80211_send_coalesce_rules(struct sk_buff *msg,
+ struct cfg80211_registered_device *rdev)
+{
+ struct nlattr *nl_pats, *nl_pat, *nl_rule, *nl_rules;
+ int i, j, pat_len;
+ struct cfg80211_coalesce_rules *rule;
+
+ if (!rdev->coalesce->n_rules)
+ return 0;
+
+ nl_rules = nla_nest_start(msg, NL80211_ATTR_COALESCE_RULE);
+ if (!nl_rules)
+ return -ENOBUFS;
+
+ for (i = 0; i < rdev->coalesce->n_rules; i++) {
+ nl_rule = nla_nest_start(msg, i + 1);
+ if (!nl_rule)
+ return -ENOBUFS;
+
+ rule = &rdev->coalesce->rules[i];
+ if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_DELAY,
+ rule->delay))
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_CONDITION,
+ rule->condition))
+ return -ENOBUFS;
+
+ nl_pats = nla_nest_start(msg,
+ NL80211_ATTR_COALESCE_RULE_PKT_PATTERN);
+ if (!nl_pats)
+ return -ENOBUFS;
+
+ for (j = 0; j < rule->n_patterns; j++) {
+ nl_pat = nla_nest_start(msg, j + 1);
+ if (!nl_pat)
+ return -ENOBUFS;
+ pat_len = rule->patterns[j].pattern_len;
+ if (nla_put(msg, NL80211_PKTPAT_MASK,
+ DIV_ROUND_UP(pat_len, 8),
+ rule->patterns[j].mask) ||
+ nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len,
+ rule->patterns[j].pattern) ||
+ nla_put_u32(msg, NL80211_PKTPAT_OFFSET,
+ rule->patterns[j].pkt_offset))
+ return -ENOBUFS;
+ nla_nest_end(msg, nl_pat);
+ }
+ nla_nest_end(msg, nl_pats);
+ nla_nest_end(msg, nl_rule);
+ }
+ nla_nest_end(msg, nl_rules);
+
+ return 0;
+}
+
+static int nl80211_get_coalesce(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct sk_buff *msg;
+ void *hdr;
+
+ if (!rdev->wiphy.coalesce)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
+ NL80211_CMD_GET_COALESCE);
+ if (!hdr)
+ goto nla_put_failure;
+
+ if (rdev->coalesce && nl80211_send_coalesce_rules(msg, rdev))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
+void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev)
+{
+ struct cfg80211_coalesce *coalesce = rdev->coalesce;
+ int i, j;
+ struct cfg80211_coalesce_rules *rule;
+
+ if (!coalesce)
+ return;
+
+ for (i = 0; i < coalesce->n_rules; i++) {
+ rule = &coalesce->rules[i];
+ for (j = 0; j < rule->n_patterns; j++)
+ kfree(rule->patterns[j].mask);
+ kfree(rule->patterns);
+ }
+ kfree(coalesce->rules);
+ kfree(coalesce);
+ rdev->coalesce = NULL;
+}
+
+static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
+ struct nlattr *rule,
+ struct cfg80211_coalesce_rules *new_rule)
+{
+ int err, i;
+ const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
+ struct nlattr *tb[NUM_NL80211_ATTR_COALESCE_RULE], *pat;
+ int rem, pat_len, mask_len, pkt_offset, n_patterns = 0;
+ struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
+
+ err = nla_parse(tb, NL80211_ATTR_COALESCE_RULE_MAX, nla_data(rule),
+ nla_len(rule), nl80211_coalesce_policy);
+ if (err)
+ return err;
+
+ if (tb[NL80211_ATTR_COALESCE_RULE_DELAY])
+ new_rule->delay =
+ nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_DELAY]);
+ if (new_rule->delay > coalesce->max_delay)
+ return -EINVAL;
+
+ if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION])
+ new_rule->condition =
+ nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]);
+ if (new_rule->condition != NL80211_COALESCE_CONDITION_MATCH &&
+ new_rule->condition != NL80211_COALESCE_CONDITION_NO_MATCH)
+ return -EINVAL;
+
+ if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN])
+ return -EINVAL;
+
+ nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
+ rem)
+ n_patterns++;
+ if (n_patterns > coalesce->n_patterns)
+ return -EINVAL;
+
+ new_rule->patterns = kcalloc(n_patterns, sizeof(new_rule->patterns[0]),
+ GFP_KERNEL);
+ if (!new_rule->patterns)
+ return -ENOMEM;
+
+ new_rule->n_patterns = n_patterns;
+ i = 0;
+
+ nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
+ rem) {
+ nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
+ nla_len(pat), NULL);
+ if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ !pat_tb[NL80211_PKTPAT_PATTERN])
+ return -EINVAL;
+ pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]);
+ mask_len = DIV_ROUND_UP(pat_len, 8);
+ if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len)
+ return -EINVAL;
+ if (pat_len > coalesce->pattern_max_len ||
+ pat_len < coalesce->pattern_min_len)
+ return -EINVAL;
+
+ if (!pat_tb[NL80211_PKTPAT_OFFSET])
+ pkt_offset = 0;
+ else
+ pkt_offset = nla_get_u32(pat_tb[NL80211_PKTPAT_OFFSET]);
+ if (pkt_offset > coalesce->max_pkt_offset)
+ return -EINVAL;
+ new_rule->patterns[i].pkt_offset = pkt_offset;
+
+ new_rule->patterns[i].mask =
+ kmalloc(mask_len + pat_len, GFP_KERNEL);
+ if (!new_rule->patterns[i].mask)
+ return -ENOMEM;
+ new_rule->patterns[i].pattern =
+ new_rule->patterns[i].mask + mask_len;
+ memcpy(new_rule->patterns[i].mask,
+ nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len);
+ new_rule->patterns[i].pattern_len = pat_len;
+ memcpy(new_rule->patterns[i].pattern,
+ nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len);
+ i++;
+ }
+
+ return 0;
+}
+
+static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
+ struct cfg80211_coalesce new_coalesce = {};
+ struct cfg80211_coalesce *n_coalesce;
+ int err, rem_rule, n_rules = 0, i, j;
+ struct nlattr *rule;
+ struct cfg80211_coalesce_rules *tmp_rule;
+
+ if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) {
+ cfg80211_rdev_free_coalesce(rdev);
+ rdev->ops->set_coalesce(&rdev->wiphy, NULL);
+ return 0;
+ }
+
+ nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
+ rem_rule)
+ n_rules++;
+ if (n_rules > coalesce->n_rules)
+ return -EINVAL;
+
+ new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]),
+ GFP_KERNEL);
+ if (!new_coalesce.rules)
+ return -ENOMEM;
+
+ new_coalesce.n_rules = n_rules;
+ i = 0;
+
+ nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
+ rem_rule) {
+ err = nl80211_parse_coalesce_rule(rdev, rule,
+ &new_coalesce.rules[i]);
+ if (err)
+ goto error;
+
+ i++;
+ }
+
+ err = rdev->ops->set_coalesce(&rdev->wiphy, &new_coalesce);
+ if (err)
+ goto error;
+
+ n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL);
+ if (!n_coalesce) {
+ err = -ENOMEM;
+ goto error;
+ }
+ cfg80211_rdev_free_coalesce(rdev);
+ rdev->coalesce = n_coalesce;
+
+ return 0;
+error:
+ for (i = 0; i < new_coalesce.n_rules; i++) {
+ tmp_rule = &new_coalesce.rules[i];
+ for (j = 0; j < tmp_rule->n_patterns; j++)
+ kfree(tmp_rule->patterns[j].mask);
+ kfree(tmp_rule->patterns);
+ }
+ kfree(new_coalesce.rules);
+
+ return err;
+}
+
static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -9043,7 +9470,30 @@ static struct genl_ops nl80211_ops[] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
- }
+ },
+ {
+ .cmd = NL80211_CMD_GET_COALESCE,
+ .doit = nl80211_get_coalesce,
+ .policy = nl80211_policy,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_SET_COALESCE,
+ .doit = nl80211_set_coalesce,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_CHANNEL_SWITCH,
+ .doit = nl80211_channel_switch,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -10000,7 +10450,7 @@ EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, u32 nlportid,
int freq, int sig_dbm,
- const u8 *buf, size_t len, gfp_t gfp)
+ const u8 *buf, size_t len, u32 flags, gfp_t gfp)
{
struct net_device *netdev = wdev->netdev;
struct sk_buff *msg;
@@ -10023,7 +10473,9 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
(sig_dbm &&
nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
- nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+ nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
+ (flags &&
+ nla_put_u32(msg, NL80211_ATTR_RXMGMT_FLAGS, flags)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index a4073e808c13..2c0f2b3c07cb 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -66,7 +66,7 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, u32 nlpid,
int freq, int sig_dbm,
- const u8 *buf, size_t len, gfp_t gfp);
+ const u8 *buf, size_t len, u32 flags, gfp_t gfp);
void
nl80211_radar_notify(struct cfg80211_registered_device *rdev,
@@ -74,4 +74,6 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
enum nl80211_radar_event event,
struct net_device *netdev, gfp_t gfp);
+void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 9f15f0ac824d..37ce9fdfe934 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -516,11 +516,12 @@ static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev)
#ifdef CONFIG_NL80211_TESTMODE
static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
void *data, int len)
{
int ret;
- trace_rdev_testmode_cmd(&rdev->wiphy);
- ret = rdev->ops->testmode_cmd(&rdev->wiphy, data, len);
+ trace_rdev_testmode_cmd(&rdev->wiphy, wdev);
+ ret = rdev->ops->testmode_cmd(&rdev->wiphy, wdev, data, len);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -923,4 +924,16 @@ static inline void rdev_crit_proto_stop(struct cfg80211_registered_device *rdev,
trace_rdev_return_void(&rdev->wiphy);
}
+static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_csa_settings *params)
+{
+ int ret;
+
+ trace_rdev_channel_switch(&rdev->wiphy, dev, params);
+ ret = rdev->ops->channel_switch(&rdev->wiphy, dev, params);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index ae8c186b50d6..eeb71480f1af 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -465,10 +465,6 @@ static int cmp_bss(struct cfg80211_bss *a,
}
}
- /*
- * we can't use compare_ether_addr here since we need a < > operator.
- * The binary return value of compare_ether_addr isn't enough
- */
r = memcmp(a->bssid, b->bssid, sizeof(a->bssid));
if (r)
return r;
@@ -651,6 +647,8 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
continue;
if (bss->pub.channel != new->pub.channel)
continue;
+ if (bss->pub.scan_width != new->pub.scan_width)
+ continue;
if (rcu_access_pointer(bss->pub.beacon_ies))
continue;
ies = rcu_access_pointer(bss->pub.ies);
@@ -870,11 +868,12 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
/* Returned bss is reference counted and must be cleaned up appropriately. */
struct cfg80211_bss*
-cfg80211_inform_bss(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- const u8 *bssid, u64 tsf, u16 capability,
- u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp)
+cfg80211_inform_bss_width(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ s32 signal, gfp_t gfp)
{
struct cfg80211_bss_ies *ies;
struct cfg80211_internal_bss tmp = {}, *res;
@@ -892,6 +891,7 @@ cfg80211_inform_bss(struct wiphy *wiphy,
memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
tmp.pub.channel = channel;
+ tmp.pub.scan_width = scan_width;
tmp.pub.signal = signal;
tmp.pub.beacon_interval = beacon_interval;
tmp.pub.capability = capability;
@@ -924,14 +924,15 @@ cfg80211_inform_bss(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss);
+EXPORT_SYMBOL(cfg80211_inform_bss_width);
/* Returned bss is reference counted and must be cleaned up appropriately. */
struct cfg80211_bss *
-cfg80211_inform_bss_frame(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp)
+cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ s32 signal, gfp_t gfp)
{
struct cfg80211_internal_bss tmp = {}, *res;
struct cfg80211_bss_ies *ies;
@@ -941,7 +942,8 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
offsetof(struct ieee80211_mgmt, u.beacon.variable));
- trace_cfg80211_inform_bss_frame(wiphy, channel, mgmt, len, signal);
+ trace_cfg80211_inform_bss_width_frame(wiphy, channel, scan_width, mgmt,
+ len, signal);
if (WARN_ON(!mgmt))
return NULL;
@@ -976,6 +978,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
tmp.pub.channel = channel;
+ tmp.pub.scan_width = scan_width;
tmp.pub.signal = signal;
tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
@@ -991,7 +994,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss_frame);
+EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
{
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index e1534baf2ebb..ba5f0d6614d5 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1293,15 +1293,17 @@ TRACE_EVENT(rdev_return_int_int,
#ifdef CONFIG_NL80211_TESTMODE
TRACE_EVENT(rdev_testmode_cmd,
- TP_PROTO(struct wiphy *wiphy),
- TP_ARGS(wiphy),
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+ TP_ARGS(wiphy, wdev),
TP_STRUCT__entry(
WIPHY_ENTRY
+ WDEV_ENTRY
),
TP_fast_assign(
WIPHY_ASSIGN;
+ WDEV_ASSIGN;
),
- TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
+ TP_printk(WIPHY_PR_FMT WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
);
TRACE_EVENT(rdev_testmode_dump,
@@ -1841,6 +1843,39 @@ TRACE_EVENT(rdev_crit_proto_stop,
WIPHY_PR_ARG, WDEV_PR_ARG)
);
+TRACE_EVENT(rdev_channel_switch,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_csa_settings *params),
+ TP_ARGS(wiphy, netdev, params),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ CHAN_DEF_ENTRY
+ __field(u16, counter_offset_beacon)
+ __field(u16, counter_offset_presp)
+ __field(bool, radar_required)
+ __field(bool, block_tx)
+ __field(u8, count)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ CHAN_DEF_ASSIGN(&params->chandef);
+ __entry->counter_offset_beacon = params->counter_offset_beacon;
+ __entry->counter_offset_presp = params->counter_offset_presp;
+ __entry->radar_required = params->radar_required;
+ __entry->block_tx = params->block_tx;
+ __entry->count = params->count;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
+ ", block_tx: %d, count: %u, radar_required: %d"
+ ", counter offsets (beacon/presp): %u/%u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
+ __entry->block_tx, __entry->count, __entry->radar_required,
+ __entry->counter_offset_beacon,
+ __entry->counter_offset_presp)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -2391,26 +2426,30 @@ TRACE_EVENT(cfg80211_get_bss,
__entry->capa_mask, __entry->capa_val)
);
-TRACE_EVENT(cfg80211_inform_bss_frame,
+TRACE_EVENT(cfg80211_inform_bss_width_frame,
TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
struct ieee80211_mgmt *mgmt, size_t len,
s32 signal),
- TP_ARGS(wiphy, channel, mgmt, len, signal),
+ TP_ARGS(wiphy, channel, scan_width, mgmt, len, signal),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_ENTRY
+ __field(enum nl80211_bss_scan_width, scan_width)
__dynamic_array(u8, mgmt, len)
__field(s32, signal)
),
TP_fast_assign(
WIPHY_ASSIGN;
CHAN_ASSIGN(channel);
+ __entry->scan_width = scan_width;
if (mgmt)
memcpy(__get_dynamic_array(mgmt), mgmt, len);
__entry->signal = signal;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "signal: %d",
- WIPHY_PR_ARG, CHAN_PR_ARG, __entry->signal)
+ TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d",
+ WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
+ __entry->signal)
);
DECLARE_EVENT_CLASS(cfg80211_bss_evt,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 74458b7f61eb..ce090c1c5e4f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -33,7 +33,8 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
}
EXPORT_SYMBOL(ieee80211_get_response_rate);
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
+ enum nl80211_bss_scan_width scan_width)
{
struct ieee80211_rate *bitrates;
u32 mandatory_rates = 0;
@@ -43,10 +44,15 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
if (WARN_ON(!sband))
return 1;
- if (sband->band == IEEE80211_BAND_2GHZ)
- mandatory_flag = IEEE80211_RATE_MANDATORY_B;
- else
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ if (scan_width == NL80211_BSS_CHAN_WIDTH_5 ||
+ scan_width == NL80211_BSS_CHAN_WIDTH_10)
+ mandatory_flag = IEEE80211_RATE_MANDATORY_G;
+ else
+ mandatory_flag = IEEE80211_RATE_MANDATORY_B;
+ } else {
mandatory_flag = IEEE80211_RATE_MANDATORY_A;
+ }
bitrates = sband->bitrates;
for (i = 0; i < sband->n_bitrates; i++)
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 66c638730c7a..b8253250d723 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -156,6 +156,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
case X25_FAC_CALLING_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
return -1;
+ if (p[2] > X25_MAX_AE_LEN)
+ return -1;
dte_facs->calling_len = p[2];
memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLING_AE;
@@ -163,6 +165,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
case X25_FAC_CALLED_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
return -1;
+ if (p[2] > X25_MAX_AE_LEN)
+ return -1;
dte_facs->called_len = p[2];
memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f77c371ea72b..ed38d5d81f9e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -308,7 +308,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
{
BUG_ON(!policy->walk.dead);
- if (del_timer(&policy->timer))
+ if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
BUG();
security_xfrm_policy_free(policy->security);
@@ -658,7 +658,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
xfrm_pol_hold(policy);
net->xfrm.policy_count[dir]++;
atomic_inc(&flow_cache_genid);
- rt_genid_bump(net);
+
+ /* After previous checking, family can either be AF_INET or AF_INET6 */
+ if (policy->family == AF_INET)
+ rt_genid_bump_ipv4(net);
+ else
+ rt_genid_bump_ipv6(net);
+
if (delpol) {
xfrm_policy_requeue(delpol, policy);
__xfrm_policy_unlink(delpol, dir);
@@ -2119,8 +2125,6 @@ restart:
* have the xfrm_state's. We need to wait for KM to
* negotiate new SA's or bail out with error.*/
if (net->xfrm.sysctl_larval_drop) {
- /* EREMOTE tells the caller to generate
- * a one-shot blackhole route. */
dst_release(dst);
xfrm_pols_put(pols, drop_pols);
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 54c0acd29468..b9c3f9e943a9 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -496,7 +496,8 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
- tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
+ CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
setup_timer(&x->rtimer, xfrm_replay_timer_handler,
(unsigned long)x);
x->curlft.add_time = get_seconds();
@@ -987,11 +988,13 @@ void xfrm_state_insert(struct xfrm_state *x)
EXPORT_SYMBOL(xfrm_state_insert);
/* xfrm_state_lock is held */
-static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
+static struct xfrm_state *__find_acq_core(struct net *net,
+ const struct xfrm_mark *m,
unsigned short family, u8 mode,
u32 reqid, u8 proto,
const xfrm_address_t *daddr,
- const xfrm_address_t *saddr, int create)
+ const xfrm_address_t *saddr,
+ int create)
{
unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
struct xfrm_state *x;
@@ -1396,9 +1399,9 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark,
EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
-xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
- const xfrm_address_t *daddr, const xfrm_address_t *saddr,
- int create, unsigned short family)
+xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
+ u8 proto, const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr, int create, unsigned short family)
{
struct xfrm_state *x;
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 65f67cb0aefb..6713f04e30ba 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -50,8 +50,13 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
static inline void selinux_xfrm_notify_policyload(void)
{
+ struct net *net;
+
atomic_inc(&flow_cache_genid);
- rt_genid_bump(&init_net);
+ rtnl_lock();
+ for_each_net(net)
+ rt_genid_bump_all(net);
+ rtnl_unlock();
}
#else
static inline int selinux_xfrm_enabled(void)