aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-04-09 20:46:01 -0700
committerJakub Kicinski <kuba@kernel.org>2021-04-09 20:48:35 -0700
commit8859a44ea0df92bccdc942ef15781ebbfe0ad9f3 (patch)
treebffdd602a7d6ad5060df41789af75965f9d58270
parent6c5e6b4ccc1bb9ac56579a9aed25d517d2318be6 (diff)
parent4e04e7513b0fa2fe8966a1c83fb473f1667e2810 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Conflicts: MAINTAINERS - keep Chandrasekar drivers/net/ethernet/mellanox/mlx5/core/en_main.c - simple fix + trust the code re-added to param.c in -next is fine include/linux/bpf.h - trivial include/linux/ethtool.h - trivial, fix kdoc while at it include/linux/skmsg.h - move to relevant place in tcp.c, comment re-wrapped net/core/skmsg.c - add the sk = sk // sk = NULL around calls net/tipc/crypto.c - trivial Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml5
-rw-r--r--Documentation/devicetree/bindings/input/adc-joystick.yaml4
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt5
-rw-r--r--Documentation/devicetree/bindings/mfd/ab8500.txt4
-rw-r--r--Documentation/devicetree/bindings/mfd/motorola-cpcap.txt16
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz90x1.txt96
-rw-r--r--Documentation/networking/ethtool-netlink.rst10
-rw-r--r--MAINTAINERS52
-rw-r--r--Makefile2
-rw-r--r--arch/arc/boot/dts/haps_hs.dts2
-rw-r--r--arch/arc/kernel/signal.c4
-rw-r--r--arch/arc/kernel/unwind.c27
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi3
-rw-r--r--arch/arm/boot/dts/armada-385-turris-omnia.dts4
-rw-r--r--arch/arm/boot/dts/at91-sam9x60ek.dts8
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm2711.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dtsi22
-rw-r--r--arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts1
-rw-r--r--arch/arm/boot/dts/omap4.dtsi5
-rw-r--r--arch/arm/boot/dts/omap44xx-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/omap5.dtsi5
-rw-r--r--arch/arm/boot/dts/sam9x60.dtsi9
-rw-r--r--arch/arm/mach-imx/avic.c16
-rw-r--r--arch/arm/mach-imx/common.h1
-rw-r--r--arch/arm/mach-imx/mach-imx1.c11
-rw-r--r--arch/arm/mach-imx/mach-imx25.c12
-rw-r--r--arch/arm/mach-imx/mach-imx27.c12
-rw-r--r--arch/arm/mach-imx/mach-imx31.c1
-rw-r--r--arch/arm/mach-imx/mach-imx35.c1
-rw-r--r--arch/arm/mach-imx/mm-imx3.c24
-rw-r--r--arch/arm/mach-keystone/keystone.c4
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S1
-rw-r--r--arch/arm/mach-omap2/omap-secure.c39
-rw-r--r--arch/arm/mach-omap2/omap-secure.h1
-rw-r--r--arch/arm/mach-omap2/pmic-cpcap.c4
-rw-r--r--arch/arm/mach-omap2/sr_device.c75
-rw-r--r--arch/arm/mach-pxa/mainstone.c8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp11x.dtsi6
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kvm/debug.c2
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c9
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/parisc/include/asm/cmpxchg.h2
-rw-r--r--arch/parisc/include/asm/processor.h1
-rw-r--r--arch/parisc/math-emu/fpu.h32
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c3
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c48
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/include/asm/uaccess.h7
-rw-r--r--arch/riscv/kernel/entry.S1
-rw-r--r--arch/riscv/kernel/stacktrace.c2
-rw-r--r--arch/riscv/mm/kasan_init.c2
-rw-r--r--arch/s390/include/asm/stacktrace.h1
-rw-r--r--arch/s390/include/asm/vdso/data.h2
-rw-r--r--arch/s390/kernel/cpcmd.c6
-rw-r--r--arch/s390/kernel/dumpstack.c12
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/time.c10
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/xen/page.h12
-rw-r--r--arch/x86/kernel/acpi/boot.c25
-rw-r--r--arch/x86/kernel/setup.c8
-rw-r--r--arch/x86/kernel/smpboot.c26
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/mmu/mmu.c9
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c26
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h24
-rw-r--r--arch/x86/kvm/svm/nested.c28
-rw-r--r--arch/x86/kvm/svm/pmu.c8
-rw-r--r--arch/x86/kvm/x86.c57
-rw-r--r--arch/x86/kvm/x86.h1
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c11
-rw-r--r--arch/x86/net/bpf_jit_comp32.c11
-rw-r--r--arch/x86/xen/p2m.c7
-rw-r--r--arch/x86/xen/setup.c16
-rw-r--r--arch/xtensa/kernel/coprocessor.S64
-rw-r--r--arch/xtensa/mm/fault.c5
-rw-r--r--block/bio.c23
-rw-r--r--block/blk-merge.c8
-rw-r--r--block/blk-mq-debugfs.c1
-rw-r--r--block/partitions/core.c7
-rw-r--r--drivers/acpi/acpica/nsaccess.c3
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/processor_idle.c5
-rw-r--r--drivers/acpi/scan.c45
-rw-r--r--drivers/acpi/tables.c42
-rw-r--r--drivers/acpi/video_detect.c1
-rw-r--r--drivers/auxdisplay/charlcd.c18
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/power/runtime.c55
-rw-r--r--drivers/block/null_blk/main.c26
-rw-r--r--drivers/block/null_blk/null_blk.h1
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/bluetooth/btusb.c7
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/bus/omap_l3_noc.c4
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/cpufreq/freq_table.c4
-rw-r--r--drivers/extcon/extcon.c1
-rw-r--r--drivers/firewire/nosy.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c60
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c74
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c13
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c29
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h5
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c108
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c12
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c11
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c2
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c13
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c31
-rw-r--r--drivers/gpu/drm/tegra/dc.c30
-rw-r--r--drivers/gpu/drm/tegra/sor.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.h1
-rw-r--r--drivers/gpu/host1x/bus.c10
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c21
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h1
-rw-r--r--drivers/infiniband/hw/hfi1/init.c10
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c2
-rw-r--r--drivers/interconnect/bulk.c2
-rw-r--r--drivers/interconnect/core.c2
-rw-r--r--drivers/interconnect/qcom/msm8939.c16
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-table.c33
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c5
-rw-r--r--drivers/misc/mei/client.c17
-rw-r--r--drivers/net/can/spi/mcp251x.c24
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c6
-rw-r--r--drivers/net/dsa/lantiq_gswip.c195
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h6
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c7
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c8
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c48
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c12
-rw-r--r--drivers/net/geneve.c24
-rw-r--r--drivers/net/ieee802154/atusb.c1
-rw-r--r--drivers/net/phy/bcm-phy-lib.c13
-rw-r--r--drivers/net/tun.c48
-rw-r--r--drivers/net/usb/hso.c33
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vxlan.c18
-rw-r--r--drivers/net/wan/hdlc_fr.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h4
-rw-r--r--drivers/net/wireless/virt_wifi.c5
-rw-r--r--drivers/of/fdt.c36
-rw-r--r--drivers/of/of_private.h2
-rw-r--r--drivers/of/overlay.c24
-rw-r--r--drivers/of/property.c11
-rw-r--r--drivers/of/unittest.c22
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c9
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c13
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c2
-rw-r--r--drivers/platform/x86/intel-hid.c16
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c8
-rw-r--r--drivers/remoteproc/pru_rproc.c20
-rw-r--r--drivers/remoteproc/qcom_pil_info.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c67
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c13
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c14
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c1
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c74
-rw-r--r--drivers/soc/ti/omap_prm.c8
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c7
-rw-r--r--drivers/usb/class/cdc-acm.c120
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/hcd.c5
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c3
-rw-r--r--drivers/usb/dwc3/gadget.c11
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c10
-rw-r--r--drivers/usb/host/xhci-mtk.c10
-rw-r--r--drivers/usb/musb/musb_core.c12
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h4
-rw-r--r--drivers/vdpa/mlx5/core/mr.c9
-rw-r--r--drivers/vdpa/mlx5/core/resources.c3
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c40
-rw-r--r--drivers/vfio/pci/Kconfig2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c6
-rw-r--r--drivers/video/fbdev/core/fbcon.c3
-rw-r--r--drivers/video/fbdev/hyperv_fb.c3
-rw-r--r--drivers/xen/Kconfig4
-rw-r--r--drivers/xen/events/events_base.c12
-rw-r--r--fs/block_dev.c8
-rw-r--r--fs/btrfs/Makefile10
-rw-r--r--fs/btrfs/dev-replace.c3
-rw-r--r--fs/btrfs/disk-io.c19
-rw-r--r--fs/btrfs/inode.c18
-rw-r--r--fs/btrfs/qgroup.c12
-rw-r--r--fs/btrfs/volumes.c3
-rw-r--r--fs/cifs/Kconfig3
-rw-r--r--fs/cifs/Makefile5
-rw-r--r--fs/cifs/cifsacl.c3
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/cifs/cifsglob.h6
-rw-r--r--fs/cifs/cifspdu.h5
-rw-r--r--fs/cifs/connect.c17
-rw-r--r--fs/cifs/file.c1
-rw-r--r--fs/cifs/smb2glob.h1
-rw-r--r--fs/cifs/smb2misc.c4
-rw-r--r--fs/cifs/smb2ops.c27
-rw-r--r--fs/cifs/smb2transport.c37
-rw-r--r--fs/file.c21
-rw-r--r--fs/gfs2/super.c14
-rw-r--r--fs/hostfs/hostfs_kern.c7
-rw-r--r--fs/io-wq.c40
-rw-r--r--fs/io_uring.c152
-rw-r--r--fs/namei.c14
-rw-r--r--fs/reiserfs/xattr.h2
-rw-r--r--include/acpi/acpi_bus.h1
-rw-r--r--include/linux/acpi.h9
-rw-r--r--include/linux/avf/virtchnl.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/bpf.h2
-rw-r--r--include/linux/device-mapper.h15
-rw-r--r--include/linux/ethtool.h23
-rw-r--r--include/linux/extcon.h23
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h2
-rw-r--r--include/linux/host1x.h9
-rw-r--r--include/linux/mlx5/mlx5_ifc.h10
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/qcom-geni-se.h2
-rw-r--r--include/linux/skmsg.h1
-rw-r--r--include/linux/virtio_net.h16
-rw-r--r--include/linux/xarray.h4
-rw-r--r--include/net/act_api.h12
-rw-r--r--include/net/netns/xfrm.h4
-rw-r--r--include/net/red.h4
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sock.h15
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/uapi/linux/blkpg.h28
-rw-r--r--include/uapi/linux/can.h2
-rw-r--r--include/uapi/linux/ethtool.h54
-rw-r--r--include/uapi/linux/rfkill.h80
-rw-r--r--kernel/bpf/disasm.c2
-rw-r--r--kernel/bpf/inode.c4
-rw-r--r--kernel/bpf/stackmap.c12
-rw-r--r--kernel/bpf/trampoline.c30
-rw-r--r--kernel/bpf/verifier.c5
-rw-r--r--kernel/fork.c16
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/power/energy_model.c2
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/signal.c20
-rw-r--r--kernel/trace/ftrace.c9
-rw-r--r--kernel/trace/trace.c3
-rw-r--r--kernel/watchdog.c5
-rw-r--r--kernel/workqueue.c19
-rw-r--r--lib/test_xarray.c26
-rw-r--r--lib/xarray.c11
-rw-r--r--mm/memory.c2
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/can/bcm.c10
-rw-r--r--net/can/isotp.c11
-rw-r--r--net/can/raw.c14
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skmsg.c12
-rw-r--r--net/core/sock.c12
-rw-r--r--net/core/xdp.c3
-rw-r--r--net/dsa/dsa2.c8
-rw-r--r--net/dsa/switch.c15
-rw-r--r--net/ethtool/common.c17
-rw-r--r--net/ethtool/eee.c4
-rw-r--r--net/ethtool/ioctl.c18
-rw-r--r--net/hsr/hsr_device.c1
-rw-r--r--net/hsr/hsr_forward.c6
-rw-r--r--net/ieee802154/nl-mac.c7
-rw-r--r--net/ieee802154/nl802154.c68
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/esp4_offload.c17
-rw-r--r--net/ipv4/ip_vti.c6
-rw-r--r--net/ipv4/tcp_bpf.c6
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c32
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/esp6_offload.c17
-rw-r--r--net/ipv6/ip6_vti.c6
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/mac80211/cfg.c4
-rw-r--r--net/mac80211/mlme.c5
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac802154/llsec.c2
-rw-r--r--net/mptcp/protocol.c100
-rw-r--r--net/ncsi/ncsi-manage.c20
-rw-r--r--net/nfc/llcp_sock.c10
-rw-r--r--net/openvswitch/conntrack.c8
-rw-r--r--net/qrtr/qrtr.c5
-rw-r--r--net/rds/message.c4
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rfkill/core.c7
-rw-r--r--net/sched/act_api.c48
-rw-r--r--net/sched/cls_api.c16
-rw-r--r--net/sched/sch_htb.c5
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sctp/ipv6.c7
-rw-r--r--net/tipc/crypto.c3
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/wireless/nl80211.c10
-rw-r--r--net/wireless/scan.c14
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/xfrm/xfrm_compat.c12
-rw-r--r--net/xfrm/xfrm_device.c2
-rw-r--r--net/xfrm/xfrm_interface.c3
-rw-r--r--net/xfrm/xfrm_output.c23
-rw-r--r--net/xfrm/xfrm_state.c11
-rw-r--r--scripts/module.lds.S2
-rw-r--r--security/integrity/iint.c8
-rw-r--r--security/selinux/ss/avtab.c101
-rw-r--r--security/selinux/ss/avtab.h2
-rw-r--r--security/selinux/ss/conditional.c12
-rw-r--r--security/selinux/ss/services.c157
-rw-r--r--security/selinux/ss/sidtab.c21
-rw-r--r--security/selinux/ss/sidtab.h4
-rw-r--r--security/tomoyo/network.c2
-rw-r--r--sound/drivers/aloop.c11
-rw-r--r--sound/pci/hda/hda_intel.c8
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c20
-rw-r--r--sound/soc/bcm/cygnus-ssp.c4
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c2
-rw-r--r--sound/soc/codecs/lpass-tx-macro.c2
-rw-r--r--sound/soc/codecs/max98373-i2c.c1
-rw-r--r--sound/soc/codecs/max98373-sdw.c1
-rw-r--r--sound/soc/codecs/max98373.c2
-rw-r--r--sound/soc/codecs/wm8960.c8
-rw-r--r--sound/soc/fsl/fsl_esai.c8
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c12
-rw-r--r--sound/soc/sof/core.c8
-rw-r--r--sound/soc/sof/intel/apl.c3
-rw-r--r--sound/soc/sof/intel/cnl.c19
-rw-r--r--sound/soc/sof/intel/hda-dsp.c21
-rw-r--r--sound/soc/sof/intel/hda.h1
-rw-r--r--sound/soc/sof/intel/icl.c3
-rw-r--r--sound/soc/sof/intel/pci-tgl.c2
-rw-r--r--sound/soc/sof/intel/tgl.c18
-rw-r--r--sound/soc/sunxi/sun4i-codec.c5
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/include/uapi/linux/kvm.h13
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.service1
-rw-r--r--tools/lib/bpf/ringbuf.c2
-rw-r--r--tools/lib/bpf/xsk.c57
-rw-r--r--tools/perf/builtin-daemon.c57
-rw-r--r--tools/perf/tests/bpf.c9
-rwxr-xr-xtools/perf/tests/shell/daemon.sh2
-rw-r--r--tools/perf/util/auxtrace.c4
-rw-r--r--tools/perf/util/bpf-event.c13
-rw-r--r--tools/perf/util/parse-events.c3
-rw-r--r--tools/perf/util/pmu.c33
-rw-r--r--tools/perf/util/pmu.h3
-rw-r--r--tools/perf/util/synthetic-events.c11
-rw-r--r--tools/perf/util/vdso.c2
-rw-r--r--tools/testing/radix-tree/idr-test.c21
-rw-r--r--tools/testing/radix-tree/linux/compiler_types.h0
-rw-r--r--tools/testing/radix-tree/multiorder.c2
-rw-r--r--tools/testing/radix-tree/xarray.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c44
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c19
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c10
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_clock.c13
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh13
522 files changed, 4690 insertions, 2217 deletions
diff --git a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
index 37f18d684f6a..4c5c3712970e 100644
--- a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
+++ b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
@@ -32,7 +32,7 @@ Optional node properties:
- "#thermal-sensor-cells" Used to expose itself to thermal fw.
Read more about iio bindings at
- Documentation/devicetree/bindings/iio/iio-bindings.txt
+ https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/
Example:
ncp15wb473@0 {
diff --git a/Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml
index 9f414dbdae86..433a3fb55a2e 100644
--- a/Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml
@@ -14,8 +14,9 @@ description: >
Industrial I/O subsystem bindings for ADC controller found in
Ingenic JZ47xx SoCs.
- ADC clients must use the format described in iio-bindings.txt, giving
- a phandle and IIO specifier pair ("io-channels") to the ADC controller.
+ ADC clients must use the format described in
+ https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml,
+ giving a phandle and IIO specifier pair ("io-channels") to the ADC controller.
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/input/adc-joystick.yaml b/Documentation/devicetree/bindings/input/adc-joystick.yaml
index 054406bbd22b..721878d5b7af 100644
--- a/Documentation/devicetree/bindings/input/adc-joystick.yaml
+++ b/Documentation/devicetree/bindings/input/adc-joystick.yaml
@@ -24,7 +24,9 @@ properties:
description: >
List of phandle and IIO specifier pairs.
Each pair defines one ADC channel to which a joystick axis is connected.
- See Documentation/devicetree/bindings/iio/iio-bindings.txt for details.
+ See
+ https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+ for details.
'#address-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt b/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
index 51456c0e9a27..af5223bb5bdd 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
@@ -5,7 +5,10 @@ Required properties:
- compatible: must be "resistive-adc-touch"
The device must be connected to an ADC device that provides channels for
position measurement and optional pressure.
-Refer to ../iio/iio-bindings.txt for details
+Refer to
+https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+for details
+
- iio-channels: must have at least two channels connected to an ADC device.
These should correspond to the channels exposed by the ADC device and should
have the right index as the ADC device registers them. These channels
diff --git a/Documentation/devicetree/bindings/mfd/ab8500.txt b/Documentation/devicetree/bindings/mfd/ab8500.txt
index d2a6e835c257..937b3e5505e0 100644
--- a/Documentation/devicetree/bindings/mfd/ab8500.txt
+++ b/Documentation/devicetree/bindings/mfd/ab8500.txt
@@ -72,7 +72,9 @@ Required child device properties:
pwm|regulator|rtc|sysctrl|usb]";
A few child devices require ADC channels from the GPADC node. Those follow the
- standard bindings from iio/iio-bindings.txt and iio/adc/adc.txt
+ standard bindings from
+ https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+ and Documentation/devicetree/bindings/iio/adc/adc.yaml
abx500-temp : io-channels "aux1" and "aux2" for measuring external
temperatures.
diff --git a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
index 5ddcc8f4febc..b52e7a33f0f9 100644
--- a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
+++ b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
@@ -16,14 +16,14 @@ Optional subnodes:
The sub-functions of CPCAP get their own node with their own compatible values,
which are described in the following files:
-- ../power/supply/cpcap-battery.txt
-- ../power/supply/cpcap-charger.txt
-- ../regulator/cpcap-regulator.txt
-- ../phy/phy-cpcap-usb.txt
-- ../input/cpcap-pwrbutton.txt
-- ../rtc/cpcap-rtc.txt
-- ../leds/leds-cpcap.txt
-- ../iio/adc/cpcap-adc.txt
+- Documentation/devicetree/bindings/power/supply/cpcap-battery.txt
+- Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
+- Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
+- Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
+- Documentation/devicetree/bindings/input/cpcap-pwrbutton.txt
+- Documentation/devicetree/bindings/rtc/cpcap-rtc.txt
+- Documentation/devicetree/bindings/leds/leds-cpcap.txt
+- Documentation/devicetree/bindings/iio/adc/motorola,cpcap-adc.yaml
The only exception is the audio codec. Instead of a compatible value its
node must be named "audio-codec".
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml b/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
index 2a3be0f9a1a1..2f46e45dcd60 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
@@ -40,7 +40,7 @@ required:
- interrupts
- interrupt-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 4b7d1e5d003c..e8f04687a3e0 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -49,7 +49,7 @@ properties:
description:
Reference to an nvmem node for the MAC address
- nvmem-cells-names:
+ nvmem-cell-names:
const: mac-address
phy-connection-type:
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
index b921731cd970..df9e844dd6bc 100644
--- a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
@@ -65,6 +65,71 @@ KSZ9031:
step is 60ps. The default value is the neutral setting, so setting
rxc-skew-ps=<0> actually results in -900 picoseconds adjustment.
+ The KSZ9031 hardware supports a range of skew values from negative to
+ positive, where the specific range is property dependent. All values
+ specified in the devicetree are offset by the minimum value so they
+ can be represented as positive integers in the devicetree since it's
+ difficult to represent a negative number in the devictree.
+
+ The following 5-bit values table apply to rxc-skew-ps and txc-skew-ps.
+
+ Pad Skew Value Delay (ps) Devicetree Value
+ ------------------------------------------------------
+ 0_0000 -900ps 0
+ 0_0001 -840ps 60
+ 0_0010 -780ps 120
+ 0_0011 -720ps 180
+ 0_0100 -660ps 240
+ 0_0101 -600ps 300
+ 0_0110 -540ps 360
+ 0_0111 -480ps 420
+ 0_1000 -420ps 480
+ 0_1001 -360ps 540
+ 0_1010 -300ps 600
+ 0_1011 -240ps 660
+ 0_1100 -180ps 720
+ 0_1101 -120ps 780
+ 0_1110 -60ps 840
+ 0_1111 0ps 900
+ 1_0000 60ps 960
+ 1_0001 120ps 1020
+ 1_0010 180ps 1080
+ 1_0011 240ps 1140
+ 1_0100 300ps 1200
+ 1_0101 360ps 1260
+ 1_0110 420ps 1320
+ 1_0111 480ps 1380
+ 1_1000 540ps 1440
+ 1_1001 600ps 1500
+ 1_1010 660ps 1560
+ 1_1011 720ps 1620
+ 1_1100 780ps 1680
+ 1_1101 840ps 1740
+ 1_1110 900ps 1800
+ 1_1111 960ps 1860
+
+ The following 4-bit values table apply to the txdX-skew-ps, rxdX-skew-ps
+ data pads, and the rxdv-skew-ps, txen-skew-ps control pads.
+
+ Pad Skew Value Delay (ps) Devicetree Value
+ ------------------------------------------------------
+ 0000 -420ps 0
+ 0001 -360ps 60
+ 0010 -300ps 120
+ 0011 -240ps 180
+ 0100 -180ps 240
+ 0101 -120ps 300
+ 0110 -60ps 360
+ 0111 0ps 420
+ 1000 60ps 480
+ 1001 120ps 540
+ 1010 180ps 600
+ 1011 240ps 660
+ 1100 300ps 720
+ 1101 360ps 780
+ 1110 420ps 840
+ 1111 480ps 900
+
Optional properties:
Maximum value of 1860, default value 900:
@@ -120,11 +185,21 @@ KSZ9131:
Examples:
+ /* Attach to an Ethernet device with autodetected PHY */
+ &enet {
+ rxc-skew-ps = <1800>;
+ rxdv-skew-ps = <0>;
+ txc-skew-ps = <1800>;
+ txen-skew-ps = <0>;
+ status = "okay";
+ };
+
+ /* Attach to an explicitly-specified PHY */
mdio {
phy0: ethernet-phy@0 {
- rxc-skew-ps = <3000>;
+ rxc-skew-ps = <1800>;
rxdv-skew-ps = <0>;
- txc-skew-ps = <3000>;
+ txc-skew-ps = <1800>;
txen-skew-ps = <0>;
reg = <0>;
};
@@ -133,3 +208,20 @@ Examples:
phy = <&phy0>;
phy-mode = "rgmii-id";
};
+
+References
+
+ Micrel ksz9021rl/rn Data Sheet, Revision 1.2. Dated 2/13/2014.
+ http://www.micrel.com/_PDF/Ethernet/datasheets/ksz9021rl-rn_ds.pdf
+
+ Micrel ksz9031rnx Data Sheet, Revision 2.1. Dated 11/20/2014.
+ http://www.micrel.com/_PDF/Ethernet/datasheets/KSZ9031RNX.pdf
+
+Notes:
+
+ Note that a previous version of the Micrel ksz9021rl/rn Data Sheet
+ was missing extended register 106 (transmit data pad skews), and
+ incorrectly specified the ps per step as 200ps/step instead of
+ 120ps/step. The latest update to this document reflects the latest
+ revision of the Micrel specification even though usage in the kernel
+ still reflects that incorrect document.
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index fd84f4ed898a..ce4a69f8308f 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -980,9 +980,9 @@ constraints on coalescing parameters and their values.
PAUSE_GET
-============
+=========
-Gets channel counts like ``ETHTOOL_GPAUSE`` ioctl request.
+Gets pause frame settings like ``ETHTOOL_GPAUSEPARAM`` ioctl request.
Request contents:
@@ -1011,7 +1011,7 @@ the statistics in the following structure:
Each member has a corresponding attribute defined.
PAUSE_SET
-============
+=========
Sets pause parameters like ``ETHTOOL_GPAUSEPARAM`` ioctl request.
@@ -1028,7 +1028,7 @@ Request contents:
EEE_GET
=======
-Gets channel counts like ``ETHTOOL_GEEE`` ioctl request.
+Gets Energy Efficient Ethernet settings like ``ETHTOOL_GEEE`` ioctl request.
Request contents:
@@ -1058,7 +1058,7 @@ first 32 are provided by the ``ethtool_ops`` callback.
EEE_SET
=======
-Sets pause parameters like ``ETHTOOL_GEEEPARAM`` ioctl request.
+Sets Energy Efficient Ethernet parameters like ``ETHTOOL_SEEE`` ioctl request.
Request contents:
diff --git a/MAINTAINERS b/MAINTAINERS
index 3ea9539821b5..795b9941c151 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2491,7 +2491,7 @@ N: sc27xx
N: sc2731
ARM/STI ARCHITECTURE
-M: Patrice Chotard <patrice.chotard@st.com>
+M: Patrice Chotard <patrice.chotard@foss.st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: http://www.stlinux.com
@@ -2524,7 +2524,7 @@ F: include/linux/remoteproc/st_slim_rproc.h
ARM/STM32 ARCHITECTURE
M: Maxime Coquelin <mcoquelin.stm32@gmail.com>
-M: Alexandre Torgue <alexandre.torgue@st.com>
+M: Alexandre Torgue <alexandre.torgue@foss.st.com>
L: linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -3117,7 +3117,7 @@ C: irc://irc.oftc.net/bcache
F: drivers/md/bcache/
BDISP ST MEDIA DRIVER
-M: Fabien Dessenne <fabien.dessenne@st.com>
+M: Fabien Dessenne <fabien.dessenne@foss.st.com>
L: linux-media@vger.kernel.org
S: Supported
W: https://linuxtv.org
@@ -3679,7 +3679,7 @@ M: bcm-kernel-feedback-list@broadcom.com
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://github.com/broadcom/stblinux.git
-F: drivers/soc/bcm/bcm-pmb.c
+F: drivers/soc/bcm/bcm63xx/bcm-pmb.c
F: include/dt-bindings/soc/bcm-pmb.h
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
@@ -5084,7 +5084,7 @@ S: Maintained
F: drivers/platform/x86/dell/dell-wmi.c
DELTA ST MEDIA DRIVER
-M: Hugues Fruchet <hugues.fruchet@st.com>
+M: Hugues Fruchet <hugues.fruchet@foss.st.com>
L: linux-media@vger.kernel.org
S: Supported
W: https://linuxtv.org
@@ -6010,7 +6010,6 @@ F: drivers/gpu/drm/rockchip/
DRM DRIVERS FOR STI
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
-M: Vincent Abriou <vincent.abriou@st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -6018,10 +6017,9 @@ F: Documentation/devicetree/bindings/display/st,stih4xx.txt
F: drivers/gpu/drm/sti
DRM DRIVERS FOR STM
-M: Yannick Fertre <yannick.fertre@st.com>
-M: Philippe Cornu <philippe.cornu@st.com>
+M: Yannick Fertre <yannick.fertre@foss.st.com>
+M: Philippe Cornu <philippe.cornu@foss.st.com>
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
-M: Vincent Abriou <vincent.abriou@st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -7480,8 +7478,9 @@ F: include/uapi/asm-generic/
GENERIC PHY FRAMEWORK
M: Kishon Vijay Abraham I <kishon@ti.com>
M: Vinod Koul <vkoul@kernel.org>
-L: linux-kernel@vger.kernel.org
+L: linux-phy@lists.infradead.org
S: Supported
+Q: https://patchwork.kernel.org/project/linux-phy/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy.git
F: Documentation/devicetree/bindings/phy/
F: drivers/phy/
@@ -8234,7 +8233,7 @@ F: include/linux/hugetlb.h
F: mm/hugetlb.c
HVA ST MEDIA DRIVER
-M: Jean-Christophe Trotin <jean-christophe.trotin@st.com>
+M: Jean-Christophe Trotin <jean-christophe.trotin@foss.st.com>
L: linux-media@vger.kernel.org
S: Supported
W: https://linuxtv.org
@@ -10034,7 +10033,6 @@ F: scripts/leaking_addresses.pl
LED SUBSYSTEM
M: Pavel Machek <pavel@ucw.cz>
-R: Dan Murphy <dmurphy@ti.com>
L: linux-leds@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
@@ -11171,7 +11169,7 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/dvb-frontends/stv6111*
MEDIA DRIVERS FOR STM32 - DCMI
-M: Hugues Fruchet <hugues.fruchet@st.com>
+M: Hugues Fruchet <hugues.fruchet@foss.st.com>
L: linux-media@vger.kernel.org
S: Supported
T: git git://linuxtv.org/media_tree.git
@@ -14857,6 +14855,14 @@ L: linux-arm-msm@vger.kernel.org
S: Maintained
F: drivers/iommu/arm/arm-smmu/qcom_iommu.c
+QUALCOMM IPC ROUTER (QRTR) DRIVER
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: include/trace/events/qrtr.h
+F: include/uapi/linux/qrtr.h
+F: net/qrtr/
+
QUALCOMM IPCC MAILBOX DRIVER
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-arm-msm@vger.kernel.org
@@ -15206,6 +15212,7 @@ F: fs/reiserfs/
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <bjorn.andersson@linaro.org>
+M: Mathieu Poirier <mathieu.poirier@linaro.org>
L: linux-remoteproc@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rproc-next
@@ -15219,6 +15226,7 @@ F: include/linux/remoteproc/
REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <bjorn.andersson@linaro.org>
+M: Mathieu Poirier <mathieu.poirier@linaro.org>
L: linux-remoteproc@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rpmsg-next
@@ -15635,8 +15643,8 @@ F: Documentation/s390/pci.rst
S390 VFIO AP DRIVER
M: Tony Krowiak <akrowiak@linux.ibm.com>
-M: Pierre Morel <pmorel@linux.ibm.com>
M: Halil Pasic <pasic@linux.ibm.com>
+M: Jason Herne <jjherne@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -15648,6 +15656,7 @@ F: drivers/s390/crypto/vfio_ap_private.h
S390 VFIO-CCW DRIVER
M: Cornelia Huck <cohuck@redhat.com>
M: Eric Farman <farman@linux.ibm.com>
+M: Matthew Rosato <mjrosato@linux.ibm.com>
R: Halil Pasic <pasic@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: kvm@vger.kernel.org
@@ -15658,6 +15667,7 @@ F: include/uapi/linux/vfio_ccw.h
S390 VFIO-PCI DRIVER
M: Matthew Rosato <mjrosato@linux.ibm.com>
+M: Eric Farman <farman@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: kvm@vger.kernel.org
S: Supported
@@ -16944,7 +16954,8 @@ F: Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt
F: drivers/media/i2c/st-mipid02.c
ST STM32 I2C/SMBUS DRIVER
-M: Pierre-Yves MORDRET <pierre-yves.mordret@st.com>
+M: Pierre-Yves MORDRET <pierre-yves.mordret@foss.st.com>
+M: Alain Volmat <alain.volmat@foss.st.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-stm32*
@@ -17069,7 +17080,7 @@ F: kernel/jump_label.c
F: kernel/static_call.c
STI AUDIO (ASoC) DRIVERS
-M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -17089,15 +17100,15 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/usb/stk1160/
STM32 AUDIO (ASoC) DRIVERS
-M: Olivier Moysan <olivier.moysan@st.com>
-M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+M: Olivier Moysan <olivier.moysan@foss.st.com>
+M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
F: sound/soc/stm/
STM32 TIMER/LPTIMER DRIVERS
-M: Fabrice Gasnier <fabrice.gasnier@st.com>
+M: Fabrice Gasnier <fabrice.gasnier@foss.st.com>
S: Maintained
F: Documentation/ABI/testing/*timer-stm32
F: Documentation/devicetree/bindings/*/*stm32-*timer*
@@ -17107,7 +17118,7 @@ F: include/linux/*/stm32-*tim*
STMMAC ETHERNET DRIVER
M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-M: Alexandre Torgue <alexandre.torgue@st.com>
+M: Alexandre Torgue <alexandre.torgue@foss.st.com>
M: Jose Abreu <joabreu@synopsys.com>
L: netdev@vger.kernel.org
S: Supported
@@ -17849,7 +17860,6 @@ S: Maintained
F: drivers/thermal/ti-soc-thermal/
TI BQ27XXX POWER SUPPLY DRIVER
-R: Dan Murphy <dmurphy@ti.com>
F: drivers/power/supply/bq27xxx_battery.c
F: drivers/power/supply/bq27xxx_battery_i2c.c
F: include/linux/power/bq27xxx_battery.h
diff --git a/Makefile b/Makefile
index d4784d181123..cc77fd45ca64 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 12
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Frozen Wasteland
# *DOCUMENTATION*
diff --git a/arch/arc/boot/dts/haps_hs.dts b/arch/arc/boot/dts/haps_hs.dts
index 60d578e2781f..76ad527a0847 100644
--- a/arch/arc/boot/dts/haps_hs.dts
+++ b/arch/arc/boot/dts/haps_hs.dts
@@ -16,7 +16,7 @@
memory {
device_type = "memory";
/* CONFIG_LINUX_RAM_BASE needs to match low mem start */
- reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
+ reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */
0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
};
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index a78d8f745a67..fdbe06c98895 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sizeof(sf->uc.uc_mcontext.regs.scratch));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
- return err;
+ return err ? -EFAULT : 0;
}
static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
@@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
if (err)
- return err;
+ return -EFAULT;
set_current_blocked(&set);
regs->bta = uregs.scratch.bta;
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 74ad4256022e..47bab67f8649 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -187,25 +187,26 @@ static void init_unwind_table(struct unwind_table *table, const char *name,
const void *table_start, unsigned long table_size,
const u8 *header_start, unsigned long header_size)
{
- const u8 *ptr = header_start + 4;
- const u8 *end = header_start + header_size;
-
table->core.pc = (unsigned long)core_start;
table->core.range = core_size;
table->init.pc = (unsigned long)init_start;
table->init.range = init_size;
table->address = table_start;
table->size = table_size;
-
- /* See if the linker provided table looks valid. */
- if (header_size <= 4
- || header_start[0] != 1
- || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
- || header_start[2] == DW_EH_PE_omit
- || read_pointer(&ptr, end, header_start[2]) <= 0
- || header_start[3] == DW_EH_PE_omit)
- header_start = NULL;
-
+ /* To avoid the pointer addition with NULL pointer.*/
+ if (header_start != NULL) {
+ const u8 *ptr = header_start + 4;
+ const u8 *end = header_start + header_size;
+ /* See if the linker provided table looks valid. */
+ if (header_size <= 4
+ || header_start[0] != 1
+ || (void *)read_pointer(&ptr, end, header_start[1])
+ != table_start
+ || header_start[2] == DW_EH_PE_omit
+ || read_pointer(&ptr, end, header_start[2]) <= 0
+ || header_start[3] == DW_EH_PE_omit)
+ header_start = NULL;
+ }
table->hdrsz = header_size;
smp_wmb();
table->header = header_start;
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 5b213a1e68bb..5e33d0e88f5b 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -40,6 +40,9 @@
ethernet1 = &cpsw_emac1;
spi0 = &spi0;
spi1 = &spi1;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
};
cpus {
diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
index 646a06420c77..5bd6a66d2c2b 100644
--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
+++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
@@ -32,7 +32,8 @@
ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
- MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
+ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
+ MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
internal-regs {
@@ -389,6 +390,7 @@
phy1: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <1>;
+ marvell,reg-init = <3 18 0 0x4985>;
/* irq is connected to &pcawan pin 7 */
};
diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
index 73b6b1f89de9..775ceb3acb6c 100644
--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
+++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
@@ -334,14 +334,6 @@
};
&pinctrl {
- atmel,mux-mask = <
- /* A B C */
- 0xFFFFFE7F 0xC0E0397F 0xEF00019D /* pioA */
- 0x03FFFFFF 0x02FC7E68 0x00780000 /* pioB */
- 0xffffffff 0xF83FFFFF 0xB800F3FC /* pioC */
- 0x003FFFFF 0x003F8000 0x00000000 /* pioD */
- >;
-
adc {
pinctrl_adc_default: adc_default {
atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index 1b1163858b1d..e3251f3e3eaa 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -84,8 +84,8 @@
pinctrl-0 = <&pinctrl_macb0_default>;
phy-mode = "rmii";
- ethernet-phy@0 {
- reg = <0x0>;
+ ethernet-phy@7 {
+ reg = <0x7>;
interrupt-parent = <&pioA>;
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
index 462b1dfb0385..720beec54d61 100644
--- a/arch/arm/boot/dts/bcm2711.dtsi
+++ b/arch/arm/boot/dts/bcm2711.dtsi
@@ -308,14 +308,6 @@
#reset-cells = <1>;
};
- bsc_intr: interrupt-controller@7ef00040 {
- compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
- reg = <0x7ef00040 0x30>;
- interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #interrupt-cells = <1>;
- };
-
aon_intr: interrupt-controller@7ef00100 {
compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
reg = <0x7ef00100 0x30>;
@@ -362,8 +354,6 @@
reg = <0x7ef04500 0x100>, <0x7ef00b00 0x300>;
reg-names = "bsc", "auto-i2c";
clock-frequency = <97500>;
- interrupt-parent = <&bsc_intr>;
- interrupts = <0>;
status = "disabled";
};
@@ -405,8 +395,6 @@
reg = <0x7ef09500 0x100>, <0x7ef05b00 0x300>;
reg-names = "bsc", "auto-i2c";
clock-frequency = <97500>;
- interrupt-parent = <&bsc_intr>;
- interrupts = <1>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 7a1e53195785..f28a96fcf23e 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -433,6 +433,7 @@
pinctrl-0 = <&pinctrl_usdhc2>;
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&vdd_sd1_reg>;
status = "disabled";
};
@@ -442,5 +443,6 @@
&pinctrl_usdhc3_cdwp>;
cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&vdd_sd0_reg>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
index c593597b2119..5a1e10def6ef 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
@@ -210,9 +210,6 @@
micrel,led-mode = <1>;
clocks = <&clks IMX6UL_CLK_ENET_REF>;
clock-names = "rmii-ref";
- reset-gpios = <&gpio_spi 1 GPIO_ACTIVE_LOW>;
- reset-assert-us = <10000>;
- reset-deassert-us = <100>;
};
@@ -222,9 +219,6 @@
micrel,led-mode = <1>;
clocks = <&clks IMX6UL_CLK_ENET2_REF>;
clock-names = "rmii-ref";
- reset-gpios = <&gpio_spi 2 GPIO_ACTIVE_LOW>;
- reset-assert-us = <10000>;
- reset-deassert-us = <100>;
};
};
};
@@ -243,6 +237,22 @@
status = "okay";
};
+&gpio_spi {
+ eth0-phy-hog {
+ gpio-hog;
+ gpios = <1 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "eth0-phy";
+ };
+
+ eth1-phy-hog {
+ gpio-hog;
+ gpios = <2 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "eth1-phy";
+ };
+};
+
&i2c1 {
clock-frequency = <100000>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
index ecbb2cc5b9ab..79cc45728cd2 100644
--- a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
+++ b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
@@ -14,5 +14,6 @@
};
&gpmi {
+ fsl,use-minimum-ecc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 72e4f6481776..4a9f9496a867 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -22,6 +22,11 @@
i2c1 = &i2c2;
i2c2 = &i2c3;
i2c3 = &i2c4;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
+ mmc3 = &mmc4;
+ mmc4 = &mmc5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
index 532868591107..1f1c04d8f472 100644
--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
@@ -770,14 +770,6 @@
ti,max-div = <2>;
};
- sha2md5_fck: sha2md5_fck@15c8 {
- #clock-cells = <0>;
- compatible = "ti,gate-clock";
- clocks = <&l3_div_ck>;
- ti,bit-shift = <1>;
- reg = <0x15c8>;
- };
-
usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
#clock-cells = <0>;
compatible = "ti,gate-clock";
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index e025b7c9a357..ee821d0ab364 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -25,6 +25,11 @@
i2c2 = &i2c3;
i2c3 = &i2c4;
i2c4 = &i2c5;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
+ mmc3 = &mmc4;
+ mmc4 = &mmc5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
index 84066c1298df..ec45ced3cde6 100644
--- a/arch/arm/boot/dts/sam9x60.dtsi
+++ b/arch/arm/boot/dts/sam9x60.dtsi
@@ -606,6 +606,15 @@
compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
ranges = <0xfffff400 0xfffff400 0x800>;
+ /* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */
+ atmel,mux-mask = <
+ /* A B C */
+ 0xffffffff 0xffe03fff 0xef00019d /* pioA */
+ 0x03ffffff 0x02fc7e7f 0x00780000 /* pioB */
+ 0xffffffff 0xffffffff 0xf83fffff /* pioC */
+ 0x003fffff 0x003f8000 0x00000000 /* pioD */
+ >;
+
pioA: gpio@fffff400 {
compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
diff --git a/arch/arm/mach-imx/avic.c b/arch/arm/mach-imx/avic.c
index 322caa21bcb3..21bce4049cec 100644
--- a/arch/arm/mach-imx/avic.c
+++ b/arch/arm/mach-imx/avic.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -162,7 +163,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
* interrupts. It registers the interrupt enable and disable functions
* to the kernel for each interrupt source.
*/
-void __init mxc_init_irq(void __iomem *irqbase)
+static void __init mxc_init_irq(void __iomem *irqbase)
{
struct device_node *np;
int irq_base;
@@ -220,3 +221,16 @@ void __init mxc_init_irq(void __iomem *irqbase)
printk(KERN_INFO "MXC IRQ initialized\n");
}
+
+static int __init imx_avic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ void __iomem *avic_base;
+
+ avic_base = of_iomap(node, 0);
+ BUG_ON(!avic_base);
+ mxc_init_irq(avic_base);
+ return 0;
+}
+
+IRQCHIP_DECLARE(imx_avic, "fsl,avic", imx_avic_init);
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 2b004cc4f95e..474dedb73bc7 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -22,7 +22,6 @@ void mx35_map_io(void);
void imx21_init_early(void);
void imx31_init_early(void);
void imx35_init_early(void);
-void mxc_init_irq(void __iomem *);
void mx31_init_irq(void);
void mx35_init_irq(void);
void mxc_set_cpu_type(unsigned int type);
diff --git a/arch/arm/mach-imx/mach-imx1.c b/arch/arm/mach-imx/mach-imx1.c
index 32df3b8012f9..8eca92d66a2e 100644
--- a/arch/arm/mach-imx/mach-imx1.c
+++ b/arch/arm/mach-imx/mach-imx1.c
@@ -17,16 +17,6 @@ static void __init imx1_init_early(void)
mxc_set_cpu_type(MXC_CPU_MX1);
}
-static void __init imx1_init_irq(void)
-{
- void __iomem *avic_addr;
-
- avic_addr = ioremap(MX1_AVIC_ADDR, SZ_4K);
- WARN_ON(!avic_addr);
-
- mxc_init_irq(avic_addr);
-}
-
static const char * const imx1_dt_board_compat[] __initconst = {
"fsl,imx1",
NULL
@@ -34,7 +24,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
.init_early = imx1_init_early,
- .init_irq = imx1_init_irq,
.dt_compat = imx1_dt_board_compat,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx25.c b/arch/arm/mach-imx/mach-imx25.c
index 95de48a1aa7d..51927bd08aef 100644
--- a/arch/arm/mach-imx/mach-imx25.c
+++ b/arch/arm/mach-imx/mach-imx25.c
@@ -22,17 +22,6 @@ static void __init imx25_dt_init(void)
imx_aips_allow_unprivileged_access("fsl,imx25-aips");
}
-static void __init mx25_init_irq(void)
-{
- struct device_node *np;
- void __iomem *avic_base;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,avic");
- avic_base = of_iomap(np, 0);
- BUG_ON(!avic_base);
- mxc_init_irq(avic_base);
-}
-
static const char * const imx25_dt_board_compat[] __initconst = {
"fsl,imx25",
NULL
@@ -42,6 +31,5 @@ DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
.init_early = imx25_init_early,
.init_machine = imx25_dt_init,
.init_late = imx25_pm_init,
- .init_irq = mx25_init_irq,
.dt_compat = imx25_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27.c b/arch/arm/mach-imx/mach-imx27.c
index 262422a9c196..e325c9468105 100644
--- a/arch/arm/mach-imx/mach-imx27.c
+++ b/arch/arm/mach-imx/mach-imx27.c
@@ -56,17 +56,6 @@ static void __init imx27_init_early(void)
mxc_set_cpu_type(MXC_CPU_MX27);
}
-static void __init mx27_init_irq(void)
-{
- void __iomem *avic_base;
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,avic");
- avic_base = of_iomap(np, 0);
- BUG_ON(!avic_base);
- mxc_init_irq(avic_base);
-}
-
static const char * const imx27_dt_board_compat[] __initconst = {
"fsl,imx27",
NULL
@@ -75,7 +64,6 @@ static const char * const imx27_dt_board_compat[] __initconst = {
DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)")
.map_io = mx27_map_io,
.init_early = imx27_init_early,
- .init_irq = mx27_init_irq,
.init_late = imx27_pm_init,
.dt_compat = imx27_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx31.c b/arch/arm/mach-imx/mach-imx31.c
index dc69dfe600df..e9a1092b6093 100644
--- a/arch/arm/mach-imx/mach-imx31.c
+++ b/arch/arm/mach-imx/mach-imx31.c
@@ -14,6 +14,5 @@ static const char * const imx31_dt_board_compat[] __initconst = {
DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
.map_io = mx31_map_io,
.init_early = imx31_init_early,
- .init_irq = mx31_init_irq,
.dt_compat = imx31_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx35.c b/arch/arm/mach-imx/mach-imx35.c
index ec5c3068715c..0fc08218b77d 100644
--- a/arch/arm/mach-imx/mach-imx35.c
+++ b/arch/arm/mach-imx/mach-imx35.c
@@ -27,6 +27,5 @@ DT_MACHINE_START(IMX35_DT, "Freescale i.MX35 (Device Tree Support)")
.l2c_aux_mask = ~0,
.map_io = mx35_map_io,
.init_early = imx35_init_early,
- .init_irq = mx35_init_irq,
.dt_compat = imx35_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 5056438e5b42..28db97289ee8 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -109,18 +109,6 @@ void __init imx31_init_early(void)
mx3_ccm_base = of_iomap(np, 0);
BUG_ON(!mx3_ccm_base);
}
-
-void __init mx31_init_irq(void)
-{
- void __iomem *avic_base;
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx31-avic");
- avic_base = of_iomap(np, 0);
- BUG_ON(!avic_base);
-
- mxc_init_irq(avic_base);
-}
#endif /* ifdef CONFIG_SOC_IMX31 */
#ifdef CONFIG_SOC_IMX35
@@ -158,16 +146,4 @@ void __init imx35_init_early(void)
mx3_ccm_base = of_iomap(np, 0);
BUG_ON(!mx3_ccm_base);
}
-
-void __init mx35_init_irq(void)
-{
- void __iomem *avic_base;
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx35-avic");
- avic_base = of_iomap(np, 0);
- BUG_ON(!avic_base);
-
- mxc_init_irq(avic_base);
-}
#endif /* ifdef CONFIG_SOC_IMX35 */
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index cd711bfc591f..2c647bdf8d25 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -65,7 +65,7 @@ static void __init keystone_init(void)
static long long __init keystone_pv_fixup(void)
{
long long offset;
- phys_addr_t mem_start, mem_end;
+ u64 mem_start, mem_end;
mem_start = memblock_start_of_DRAM();
mem_end = memblock_end_of_DRAM();
@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
if (mem_start < KEYSTONE_HIGH_PHYS_START ||
mem_end > KEYSTONE_HIGH_PHYS_END) {
pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
- (u64)mem_start, (u64)mem_end);
+ mem_start, mem_end);
return 0;
}
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 14a6c3eb3298..f745a65d3bd7 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -15,6 +15,7 @@
#include <linux/platform_data/gpio-omap.h>
#include <asm/assembler.h>
+#include <asm/irq.h>
#include "ams-delta-fiq.h"
#include "board-ams-delta.h"
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
index f70d561f37f7..0659ab4cb0af 100644
--- a/arch/arm/mach-omap2/omap-secure.c
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -9,6 +9,7 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -20,6 +21,7 @@
#include "common.h"
#include "omap-secure.h"
+#include "soc.h"
static phys_addr_t omap_secure_memblock_base;
@@ -213,3 +215,40 @@ void __init omap_secure_init(void)
{
omap_optee_init_check();
}
+
+/*
+ * Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return
+ * address after MMU has been re-enabled after CPU1 has been woken up again.
+ * Otherwise the ROM code will attempt to use the earlier physical return
+ * address that got set with MMU off when waking up CPU1. Only used on secure
+ * devices.
+ */
+static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v)
+{
+ switch (cmd) {
+ case CPU_CLUSTER_PM_EXIT:
+ omap_secure_dispatcher(OMAP4_PPA_SERVICE_0,
+ FLAG_START_CRITICAL,
+ 0, 0, 0, 0, 0);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block secure_notifier_block = {
+ .notifier_call = cpu_notifier,
+};
+
+static int __init secure_pm_init(void)
+{
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx())
+ return 0;
+
+ cpu_pm_register_notifier(&secure_notifier_block);
+
+ return 0;
+}
+omap_arch_initcall(secure_pm_init);
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index 4aaa95706d39..172069f31616 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -50,6 +50,7 @@
#define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107
/* Secure PPA(Primary Protected Application) APIs */
+#define OMAP4_PPA_SERVICE_0 0x21
#define OMAP4_PPA_L2_POR_INDEX 0x23
#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25
diff --git a/arch/arm/mach-omap2/pmic-cpcap.c b/arch/arm/mach-omap2/pmic-cpcap.c
index 09076ad0576d..668dc84fd31e 100644
--- a/arch/arm/mach-omap2/pmic-cpcap.c
+++ b/arch/arm/mach-omap2/pmic-cpcap.c
@@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void)
omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu);
if (of_machine_is_compatible("motorola,droid-bionic")) {
- voltdm = voltdm_lookup("mpu");
+ voltdm = voltdm_lookup("core");
omap_voltage_register_pmic(voltdm, &omap_cpcap_core);
- voltdm = voltdm_lookup("mpu");
+ voltdm = voltdm_lookup("iva");
omap_voltage_register_pmic(voltdm, &omap_cpcap_iva);
} else {
voltdm = voltdm_lookup("core");
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index 62df666c2bd0..17b66f0d0dee 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -88,34 +88,26 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
extern struct omap_sr_data omap_sr_pdata[];
-static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+static int __init sr_init_by_name(const char *name, const char *voltdm)
{
struct omap_sr_data *sr_data = NULL;
struct omap_volt_data *volt_data;
- struct omap_smartreflex_dev_attr *sr_dev_attr;
static int i;
- if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) ||
- !strncmp(oh->name, "smartreflex_mpu", 16))
+ if (!strncmp(name, "smartreflex_mpu_iva", 20) ||
+ !strncmp(name, "smartreflex_mpu", 16))
sr_data = &omap_sr_pdata[OMAP_SR_MPU];
- else if (!strncmp(oh->name, "smartreflex_core", 17))
+ else if (!strncmp(name, "smartreflex_core", 17))
sr_data = &omap_sr_pdata[OMAP_SR_CORE];
- else if (!strncmp(oh->name, "smartreflex_iva", 16))
+ else if (!strncmp(name, "smartreflex_iva", 16))
sr_data = &omap_sr_pdata[OMAP_SR_IVA];
if (!sr_data) {
- pr_err("%s: Unknown instance %s\n", __func__, oh->name);
+ pr_err("%s: Unknown instance %s\n", __func__, name);
return -EINVAL;
}
- sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
- if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
- pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
- __func__, oh->name);
- goto exit;
- }
-
- sr_data->name = oh->name;
+ sr_data->name = name;
if (cpu_is_omap343x())
sr_data->ip_type = 1;
else
@@ -136,10 +128,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
}
}
- sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
+ sr_data->voltdm = voltdm_lookup(voltdm);
if (!sr_data->voltdm) {
pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
- __func__, sr_dev_attr->sensor_voltdm_name);
+ __func__, voltdm);
goto exit;
}
@@ -160,6 +152,20 @@ exit:
return 0;
}
+static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+{
+ struct omap_smartreflex_dev_attr *sr_dev_attr;
+
+ sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
+ if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
+ pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
+ __func__, oh->name);
+ return 0;
+ }
+
+ return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name);
+}
+
/*
* API to be called from board files to enable smartreflex
* autocompensation at init.
@@ -169,7 +175,42 @@ void __init omap_enable_smartreflex_on_init(void)
sr_enable_on_init = true;
}
+static const char * const omap4_sr_instances[] = {
+ "mpu",
+ "iva",
+ "core",
+};
+
+static const char * const dra7_sr_instances[] = {
+ "mpu",
+ "core",
+};
+
int __init omap_devinit_smartreflex(void)
{
+ const char * const *sr_inst;
+ int i, nr_sr = 0;
+
+ if (soc_is_omap44xx()) {
+ sr_inst = omap4_sr_instances;
+ nr_sr = ARRAY_SIZE(omap4_sr_instances);
+
+ } else if (soc_is_dra7xx()) {
+ sr_inst = dra7_sr_instances;
+ nr_sr = ARRAY_SIZE(dra7_sr_instances);
+ }
+
+ if (nr_sr) {
+ const char *name, *voltdm;
+
+ for (i = 0; i < nr_sr; i++) {
+ name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]);
+ voltdm = sr_inst[i];
+ sr_init_by_name(name, voltdm);
+ }
+
+ return 0;
+ }
+
return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
}
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d1010ec26e9f..d237bd030238 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -502,16 +502,20 @@ static inline void mainstone_init_keypad(void) {}
#endif
static int mst_pcmcia0_irqs[11] = {
- [0 ... 10] = -1,
+ [0 ... 4] = -1,
[5] = MAINSTONE_S0_CD_IRQ,
+ [6 ... 7] = -1,
[8] = MAINSTONE_S0_STSCHG_IRQ,
+ [9] = -1,
[10] = MAINSTONE_S0_IRQ,
};
static int mst_pcmcia1_irqs[11] = {
- [0 ... 10] = -1,
+ [0 ... 4] = -1,
[5] = MAINSTONE_S1_CD_IRQ,
+ [6 ... 7] = -1,
[8] = MAINSTONE_S1_STSCHG_IRQ,
+ [9] = -1,
[10] = MAINSTONE_S1_IRQ,
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 7de6b376d792..9058cfa4980f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -198,6 +198,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 5a8a1dc4262d..28c51e521cb2 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -348,6 +348,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <0 75 0x4>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 1d6dfd189c7f..39458305e333 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -354,6 +354,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index 5ccc4cc91959..a003e6af3353 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -124,7 +124,7 @@
#define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
index 0e1a6d953389..122c95ddad30 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
@@ -35,7 +35,7 @@
&i2c2 {
clock-frequency = <400000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
index 44a8c2337cee..f3965ec5b31d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
@@ -67,7 +67,7 @@
&i2c1 {
clock-frequency = <400000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
index b94b02080a34..68e8fa172974 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
@@ -130,7 +130,7 @@
#define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
+#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
index 64179a372ecf..c6f5df2deccf 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
@@ -310,9 +310,11 @@
};
CP11X_LABEL(sata0): sata@540000 {
- compatible = "marvell,armada-8k-ahci";
+ compatible = "marvell,armada-8k-ahci",
+ "generic-ahci";
reg = <0x540000 0x30000>;
dma-coherent;
+ interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&CP11X_LABEL(clk) 1 15>,
<&CP11X_LABEL(clk) 1 16>;
#address-cells = <1>;
@@ -320,12 +322,10 @@
status = "disabled";
sata-port@0 {
- interrupts = <109 IRQ_TYPE_LEVEL_HIGH>;
reg = <0>;
};
sata-port@1 {
- interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
reg = <1>;
};
};
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 4e90c2debf70..94d4025acc0b 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -278,6 +278,7 @@
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
/* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_TTRF (1 << 19)
#define MDCR_EL2_TPMS (1 << 14)
#define MDCR_EL2_E2PB_MASK (UL(0x3))
#define MDCR_EL2_E2PB_SHIFT (UL(12))
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2a5d9854d664..e5281e1c8f1d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -383,7 +383,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
* of support.
*/
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
ARM64_FTR_END,
};
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 7a7e425616b5..dbc890511631 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
* - Debug ROM Address (MDCR_EL2_TDRA)
* - OS related registers (MDCR_EL2_TDOSA)
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
*
* Additionally, KVM only traps guest accesses to the debug registers if
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@@ -112,6 +113,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
MDCR_EL2_TPMS |
+ MDCR_EL2_TTRF |
MDCR_EL2_TPMCR |
MDCR_EL2_TDRA |
MDCR_EL2_TDOSA);
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index ee3682b9873c..39f8f7f9227c 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -429,6 +429,13 @@ u64 __vgic_v3_get_gic_config(void)
if (has_vhe())
flags = local_daif_save();
+ /*
+ * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
+ * that to be able to set ICC_SRE_EL1.SRE to 0, all the
+ * interrupt overrides must be set. You've got to love this.
+ */
+ sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
+ isb();
write_gicreg(0, ICC_SRE_EL1);
isb();
@@ -436,6 +443,8 @@ u64 __vgic_v3_get_gic_config(void)
write_gicreg(sre, ICC_SRE_EL1);
isb();
+ sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
+ isb();
if (has_vhe())
local_daif_restore(flags);
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 279be0153f8b..23a140327a0b 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -43,7 +43,7 @@
#include <asm/prom.h>
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
-const char __section(".appended_dtb") __appended_dtb[0x100000];
+char __section(".appended_dtb") __appended_dtb[0x100000];
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index cf5ee9b0b393..84ee232278a6 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
- case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
+ case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
}
__cmpxchg_called_with_bad_pointer();
return old;
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 11ece0d07374..b5fbcd2c1780 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -272,7 +272,6 @@ on downward growing arches, it looks like this:
regs->gr[23] = 0; \
} while(0)
-struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
diff --git a/arch/parisc/math-emu/fpu.h b/arch/parisc/math-emu/fpu.h
index 853c19c03828..dec951d40286 100644
--- a/arch/parisc/math-emu/fpu.h
+++ b/arch/parisc/math-emu/fpu.h
@@ -5,34 +5,10 @@
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*/
-/*
- * BEGIN_DESC
- *
- * File:
- * @(#) pa/fp/fpu.h $Revision: 1.1 $
- *
- * Purpose:
- * <<please update with a synopis of the functionality provided by this file>>
- *
- *
- * END_DESC
-*/
-
-#ifdef __NO_PA_HDRS
- PA header file -- do not include this header file for non-PA builds.
-#endif
-
#ifndef _MACHINE_FPU_INCLUDED /* allows multiple inclusion */
#define _MACHINE_FPU_INCLUDED
-#if 0
-#ifndef _SYS_STDSYMS_INCLUDED
-# include <sys/stdsyms.h>
-#endif /* _SYS_STDSYMS_INCLUDED */
-#include <machine/pdc/pdc_rqsts.h>
-#endif
-
#define PA83_FPU_FLAG 0x00000001
#define PA89_FPU_FLAG 0x00000002
#define PA2_0_FPU_FLAG 0x00000010
@@ -43,21 +19,19 @@
#define COPR_FP 0x00000080 /* Floating point -- Coprocessor 0 */
#define SFU_MPY_DIVIDE 0x00008000 /* Multiply/Divide __ SFU 0 */
-
#define EM_FPU_TYPE_OFFSET 272
/* version of EMULATION software for COPR,0,0 instruction */
#define EMULATION_VERSION 4
/*
- * The only was to differeniate between TIMEX and ROLEX (or PCX-S and PCX-T)
- * is thorough the potential type field from the PDC_MODEL call. The
- * following flags are used at assist this differeniation.
+ * The only way to differentiate between TIMEX and ROLEX (or PCX-S and PCX-T)
+ * is through the potential type field from the PDC_MODEL call.
+ * The following flags are used to assist this differentiation.
*/
#define ROLEX_POTENTIAL_KEY_FLAGS PDC_MODEL_CPU_KEY_WORD_TO_IO
#define TIMEX_POTENTIAL_KEY_FLAGS (PDC_MODEL_CPU_KEY_QUAD_STORE | \
PDC_MODEL_CPU_KEY_RECIP_SQRT)
-
#endif /* ! _MACHINE_FPU_INCLUDED */
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 764170fdb0f7..3805519a6469 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
want_v = hpte_encode_avpn(vpn, psize, ssize);
- flags = (newpp & 7) | H_AVPN;
+ flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
+ flags |= (newpp & HPTE_R_KEY_HI) >> 48;
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
/* Move pp0 into bit 8 (IBM 55) */
flags |= (newpp & HPTE_R_PP0) >> 55;
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index ea4d6a660e0d..e83e0891272d 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -452,12 +452,28 @@ static int do_suspend(void)
return ret;
}
+/**
+ * struct pseries_suspend_info - State shared between CPUs for join/suspend.
+ * @counter: Threads are to increment this upon resuming from suspend
+ * or if an error is received from H_JOIN. The thread which performs
+ * the first increment (i.e. sets it to 1) is responsible for
+ * waking the other threads.
+ * @done: False if join/suspend is in progress. True if the operation is
+ * complete (successful or not).
+ */
+struct pseries_suspend_info {
+ atomic_t counter;
+ bool done;
+};
+
static int do_join(void *arg)
{
- atomic_t *counter = arg;
+ struct pseries_suspend_info *info = arg;
+ atomic_t *counter = &info->counter;
long hvrc;
int ret;
+retry:
/* Must ensure MSR.EE off for H_JOIN. */
hard_irq_disable();
hvrc = plpar_hcall_norets(H_JOIN);
@@ -473,8 +489,20 @@ static int do_join(void *arg)
case H_SUCCESS:
/*
* The suspend is complete and this cpu has received a
- * prod.
+ * prod, or we've received a stray prod from unrelated
+ * code (e.g. paravirt spinlocks) and we need to join
+ * again.
+ *
+ * This barrier orders the return from H_JOIN above vs
+ * the load of info->done. It pairs with the barrier
+ * in the wakeup/prod path below.
*/
+ smp_mb();
+ if (READ_ONCE(info->done) == false) {
+ pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
+ smp_processor_id());
+ goto retry;
+ }
ret = 0;
break;
case H_BAD_MODE:
@@ -488,6 +516,13 @@ static int do_join(void *arg)
if (atomic_inc_return(counter) == 1) {
pr_info("CPU %u waking all threads\n", smp_processor_id());
+ WRITE_ONCE(info->done, true);
+ /*
+ * This barrier orders the store to info->done vs subsequent
+ * H_PRODs to wake the other CPUs. It pairs with the barrier
+ * in the H_SUCCESS case above.
+ */
+ smp_mb();
prod_others();
}
/*
@@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
int ret;
while (true) {
- atomic_t counter = ATOMIC_INIT(0);
+ struct pseries_suspend_info info;
unsigned long vasi_state;
int vasi_err;
- ret = stop_machine(do_join, &counter, cpu_online_mask);
+ info = (struct pseries_suspend_info) {
+ .counter = ATOMIC_INIT(0),
+ .done = false,
+ };
+
+ ret = stop_machine(do_join, &info, cpu_online_mask);
if (ret == 0)
break;
/*
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 87d7b52f278f..0d0cf67359cb 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -314,7 +314,7 @@ endchoice
# Common NUMA Features
config NUMA
bool "NUMA Memory Allocation and Scheduler Support"
- depends on SMP
+ depends on SMP && MMU
select GENERIC_ARCH_NUMA
select OF_NUMA
select ARCH_SUPPORTS_NUMA_BALANCING
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 824b2c9da75b..f944062c9d99 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -306,7 +306,9 @@ do { \
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
+ * to the result of dereferencing @ptr. The value of @x is copied to avoid
+ * re-ordering where @x is evaluated inside the block that enables user-space
+ * access (thus bypassing user space protection if @x is a function).
*
* Caller must check the pointer with access_ok() before calling this
* function.
@@ -316,12 +318,13 @@ do { \
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__(*__gu_ptr) __val = (x); \
long __pu_err = 0; \
\
__chk_user_ptr(__gu_ptr); \
\
__enable_user_access(); \
- __put_user_nocheck(x, __gu_ptr, __pu_err); \
+ __put_user_nocheck(__val, __gu_ptr, __pu_err); \
__disable_user_access(); \
\
__pu_err; \
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 744f3209c48d..76274a4a1d8e 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -447,6 +447,7 @@ ENDPROC(__switch_to)
#endif
.section ".rodata"
+ .align LGREG
/* Exception vector table */
ENTRY(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 3f893c9d9d85..2b3e0cb90d78 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -14,7 +14,7 @@
#include <asm/stacktrace.h>
-register const unsigned long sp_in_global __asm__("sp");
+register unsigned long sp_in_global __asm__("sp");
#ifdef CONFIG_FRAME_POINTER
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 4f85c6d0ddf8..937d13ce9ab8 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -216,7 +216,7 @@ void __init kasan_init(void)
break;
kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
- };
+ }
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index ee056f4a4fa3..2b543163d90a 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -12,6 +12,7 @@ enum stack_type {
STACK_TYPE_IRQ,
STACK_TYPE_NODAT,
STACK_TYPE_RESTART,
+ STACK_TYPE_MCCK,
};
struct stack_info {
diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h
index 7b3cdb4a5f48..73ee89142666 100644
--- a/arch/s390/include/asm/vdso/data.h
+++ b/arch/s390/include/asm/vdso/data.h
@@ -6,7 +6,7 @@
#include <vdso/datapage.h>
struct arch_vdso_data {
- __u64 tod_steering_delta;
+ __s64 tod_steering_delta;
__u64 tod_steering_end;
};
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index af013b4244d3..2da027359798 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
static int diag8_response(int cmdlen, char *response, int *rlen)
{
+ unsigned long _cmdlen = cmdlen | 0x40000000L;
+ unsigned long _rlen = *rlen;
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = (addr_t) response;
- register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
- register unsigned long reg5 asm ("5") = *rlen;
+ register unsigned long reg4 asm ("4") = _cmdlen;
+ register unsigned long reg5 asm ("5") = _rlen;
asm volatile(
" diag %2,%0,0x8\n"
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 0dc4b258b98d..db1bc00229ca 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -79,6 +79,15 @@ static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
}
+static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
+{
+ unsigned long frame_size, top;
+
+ frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ top = S390_lowcore.mcck_stack + frame_size;
+ return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top);
+}
+
static bool in_restart_stack(unsigned long sp, struct stack_info *info)
{
unsigned long frame_size, top;
@@ -108,7 +117,8 @@ int get_stack_info(unsigned long sp, struct task_struct *task,
/* Check per-cpu stacks */
if (!in_irq_stack(sp, info) &&
!in_nodat_stack(sp, info) &&
- !in_restart_stack(sp, info))
+ !in_restart_stack(sp, info) &&
+ !in_mcck_stack(sp, info))
goto unknown;
recursion_check:
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 601c21791338..714269e10eec 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -174,7 +174,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4);
regs->int_parm = S390_lowcore.ext_params;
- regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2;
+ regs->int_parm_long = S390_lowcore.ext_params2;
from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
if (from_idle)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 60da976eee6f..72134f9f6ff5 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -354,7 +354,7 @@ static int __init stack_realloc(void)
if (!new)
panic("Couldn't allocate machine check stack");
WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET);
- memblock_free(old, THREAD_SIZE);
+ memblock_free_late(old, THREAD_SIZE);
return 0;
}
early_initcall(stack_realloc);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 165da961f901..326cb8f75f58 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -80,10 +80,12 @@ void __init time_early_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
+ int cs;
/* Initialize TOD steering parameters */
tod_steering_end = tod_clock_base.tod;
- vdso_data->arch_data.tod_steering_end = tod_steering_end;
+ for (cs = 0; cs < CS_BASES; cs++)
+ vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
if (!test_facility(28))
return;
@@ -366,6 +368,7 @@ static void clock_sync_global(unsigned long delta)
{
unsigned long now, adj;
struct ptff_qto qto;
+ int cs;
/* Fixup the monotonic sched clock. */
tod_clock_base.eitod += delta;
@@ -381,7 +384,10 @@ static void clock_sync_global(unsigned long delta)
panic("TOD clock sync offset %li is too large to drift\n",
tod_steering_delta);
tod_steering_end = now + (abs(tod_steering_delta) << 15);
- vdso_data->arch_data.tod_steering_end = tod_steering_end;
+ for (cs = 0; cs < CS_BASES; cs++) {
+ vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
+ vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
+ }
/* Update LPAR offset. */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2d6d5a28c3bf..9a85eae37b17 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -27,7 +27,7 @@ endif
REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
- -mno-mmx -mno-sse
+ -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
REALMODE_CFLAGS += -ffreestanding
REALMODE_CFLAGS += -fno-stack-protector
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index c0538f82c9a2..630ff08532be 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -132,6 +132,7 @@ void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
+void cond_wakeup_cpu0(void);
void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 7068e4bb057d..1a162e559753 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -87,18 +87,6 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
#endif
/*
- * The maximum amount of extra memory compared to the base size. The
- * main scaling factor is the size of struct page. At extreme ratios
- * of base:extra, all the base memory can be filled with page
- * structures for the extra memory, leaving no space for anything
- * else.
- *
- * 10x seems like a reasonable balance between scaling flexibility and
- * leaving a practically usable system.
- */
-#define XEN_EXTRA_MEM_RATIO (10)
-
-/*
* Helper functions to write or read unsigned long values to/from
* memory, when the access may fault.
*/
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 7bdc0239a943..14cd3186dc77 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void)
/*
* Initialize the ACPI boot-time table parser.
*/
- if (acpi_table_init()) {
+ if (acpi_locate_initial_tables())
disable_acpi();
- return;
- }
+ else
+ acpi_reserve_initial_tables();
+}
+
+int __init early_acpi_boot_init(void)
+{
+ if (acpi_disabled)
+ return 1;
+
+ acpi_table_init_complete();
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
@@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void)
} else {
printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
disable_acpi();
- return;
+ return 1;
}
}
-}
-
-int __init early_acpi_boot_init(void)
-{
- /*
- * If acpi_disabled, bail out
- */
- if (acpi_disabled)
- return 1;
/*
* Process the Multiple APIC Description Table (MADT), if present
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d883176ef2ce..5ecd69a48393 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1045,6 +1045,9 @@ void __init setup_arch(char **cmdline_p)
cleanup_highmap();
+ /* Look for ACPI tables and reserve memory occupied by them. */
+ acpi_boot_table_init();
+
memblock_set_current_limit(ISA_END_ADDRESS);
e820__memblock_setup();
@@ -1136,11 +1139,6 @@ void __init setup_arch(char **cmdline_p)
early_platform_quirks();
- /*
- * Parse the ACPI tables for possible boot-time SMP configuration.
- */
- acpi_boot_table_init();
-
early_acpi_boot_init();
initmem_init();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 02813a7f3a7c..16703c35a944 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1659,13 +1659,17 @@ void play_dead_common(void)
local_irq_disable();
}
-static bool wakeup_cpu0(void)
+/**
+ * cond_wakeup_cpu0 - Wake up CPU0 if needed.
+ *
+ * If NMI wants to wake up CPU0, start CPU0.
+ */
+void cond_wakeup_cpu0(void)
{
if (smp_processor_id() == 0 && enable_start_cpu0)
- return true;
-
- return false;
+ start_cpu0();
}
+EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
/*
* We need to flush the caches before going to sleep, lest we have
@@ -1734,11 +1738,8 @@ static inline void mwait_play_dead(void)
__monitor(mwait_ptr, 0, 0);
mb();
__mwait(eax, 0);
- /*
- * If NMI wants to wake up CPU0, start CPU0.
- */
- if (wakeup_cpu0())
- start_cpu0();
+
+ cond_wakeup_cpu0();
}
}
@@ -1749,11 +1750,8 @@ void hlt_play_dead(void)
while (1) {
native_halt();
- /*
- * If NMI wants to wake up CPU0, start CPU0.
- */
- if (wakeup_cpu0())
- start_cpu0();
+
+ cond_wakeup_cpu0();
}
}
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 1b4766fe1de2..eafc4d601f25 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y += -Iarch/x86/kvm
+ccflags-y += -I $(srctree)/arch/x86/kvm
ccflags-$(CONFIG_KVM_WERROR) += -Werror
ifeq ($(CONFIG_FRAME_POINTER),y)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d75524bc8423..951dae4e7175 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5884,6 +5884,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
struct kvm_mmu_page *sp;
unsigned int ratio;
LIST_HEAD(invalid_list);
+ bool flush = false;
ulong to_zap;
rcu_idx = srcu_read_lock(&kvm->srcu);
@@ -5905,19 +5906,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
lpage_disallowed_link);
WARN_ON_ONCE(!sp->lpage_disallowed);
if (is_tdp_mmu_page(sp)) {
- kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
- sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
+ flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
} else {
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
WARN_ON_ONCE(sp->lpage_disallowed);
}
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
cond_resched_rwlock_write(&kvm->mmu_lock);
+ flush = false;
}
}
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, rcu_idx);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 462b1f71c77f..018d82e73e31 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -86,7 +86,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end, bool can_yield);
+ gfn_t start, gfn_t end, bool can_yield, bool flush);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
@@ -99,7 +99,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
list_del(&root->link);
- zap_gfn_range(kvm, root, 0, max_gfn, false);
+ zap_gfn_range(kvm, root, 0, max_gfn, false, false);
free_page((unsigned long)root->spt);
kmem_cache_free(mmu_page_header_cache, root);
@@ -668,20 +668,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
* scheduler needs the CPU or there is contention on the MMU lock. If this
* function cannot yield, it will not release the MMU lock or reschedule and
* the caller must ensure it does not supply too large a GFN range, or the
- * operation can cause a soft lockup.
+ * operation can cause a soft lockup. Note, in some use cases a flush may be
+ * required by prior actions. Ensure the pending flush is performed prior to
+ * yielding.
*/
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end, bool can_yield)
+ gfn_t start, gfn_t end, bool can_yield, bool flush)
{
struct tdp_iter iter;
- bool flush_needed = false;
rcu_read_lock();
tdp_root_for_each_pte(iter, root, start, end) {
if (can_yield &&
- tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
- flush_needed = false;
+ tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
+ flush = false;
continue;
}
@@ -699,11 +700,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
continue;
tdp_mmu_set_spte(kvm, &iter, 0);
- flush_needed = true;
+ flush = true;
}
rcu_read_unlock();
- return flush_needed;
+ return flush;
}
/*
@@ -712,13 +713,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock.
*/
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+ bool can_yield)
{
struct kvm_mmu_page *root;
bool flush = false;
for_each_tdp_mmu_root_yield_safe(kvm, root)
- flush |= zap_gfn_range(kvm, root, start, end, true);
+ flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
return flush;
}
@@ -930,7 +932,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
struct kvm_mmu_page *root, gfn_t start,
gfn_t end, unsigned long unused)
{
- return zap_gfn_range(kvm, root, start, end, false);
+ return zap_gfn_range(kvm, root, start, end, false, false);
}
int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 3b761c111bff..31096ece9b14 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -8,7 +8,29 @@
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+ bool can_yield);
+static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
+ gfn_t end)
+{
+ return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
+}
+static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
+
+ /*
+ * Don't allow yielding, as the caller may have a flush pending. Note,
+ * if mmu_lock is held for write, zapping will never yield in this case,
+ * but explicitly disallow it for safety. The TDP MMU does not yield
+ * until it has made forward progress (steps sideways), and when zapping
+ * a single shadow page that it's guaranteed to see (thus the mmu_lock
+ * requirement), its "step sideways" will always step beyond the bounds
+ * of the shadow page's gfn range and stop iterating before yielding.
+ */
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
+}
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 35891d9a1099..fb204eaa8bb3 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
return true;
}
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma;
+ /*
+ * FIXME: these should be done after copying the fields,
+ * to avoid TOC/TOU races. For these save area checks
+ * the possible damage is limited since kvm_set_cr0 and
+ * kvm_set_cr4 handle failure; EFER_SVME is an exception
+ * so it is force-set later in nested_prepare_vmcb_save.
+ */
if ((vmcb12->save.efer & EFER_SVME) == 0)
return false;
@@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
return false;
- return nested_vmcb_check_controls(&vmcb12->control);
+ return true;
}
static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
svm->vmcb->save.gdtr = vmcb12->save.gdtr;
svm->vmcb->save.idtr = vmcb12->save.idtr;
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
- svm_set_efer(&svm->vcpu, vmcb12->save.efer);
+
+ /*
+ * Force-set EFER_SVME even though it is checked earlier on the
+ * VMCB12, because the guest can flip the bit between the check
+ * and now. Clearing EFER_SVME would call svm_free_nested.
+ */
+ svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
+
svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
@@ -468,7 +482,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
svm->nested.vmcb12_gpa = vmcb12_gpa;
- load_nested_vmcb_control(svm, &vmcb12->control);
nested_prepare_vmcb_control(svm);
nested_prepare_vmcb_save(svm, vmcb12);
@@ -515,7 +528,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL;
- if (!nested_vmcb_checks(svm, vmcb12)) {
+ load_nested_vmcb_control(svm, &vmcb12->control);
+
+ if (!nested_vmcb_check_save(svm, vmcb12) ||
+ !nested_vmcb_check_controls(&svm->nested.ctl)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0;
@@ -1209,6 +1225,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
*/
if (!(save->cr0 & X86_CR0_PG))
goto out_free;
+ if (!(save->efer & EFER_SVME))
+ goto out_free;
/*
* All checks done, we can enter guest mode. L1 control fields
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 035da07500e8..fdf587f19c5f 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -98,6 +98,8 @@ static enum index msr_to_index(u32 msr)
static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
enum pmu_type type)
{
+ struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+
switch (msr) {
case MSR_F15H_PERF_CTL0:
case MSR_F15H_PERF_CTL1:
@@ -105,6 +107,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
case MSR_F15H_PERF_CTL3:
case MSR_F15H_PERF_CTL4:
case MSR_F15H_PERF_CTL5:
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+ return NULL;
+ fallthrough;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
if (type != PMU_TYPE_EVNTSEL)
return NULL;
@@ -115,6 +120,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
case MSR_F15H_PERF_CTR3:
case MSR_F15H_PERF_CTR4:
case MSR_F15H_PERF_CTR5:
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+ return NULL;
+ fallthrough;
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
if (type != PMU_TYPE_COUNTER)
return NULL;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fe806e894212..eca63625aee4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -271,8 +271,7 @@ static struct kmem_cache *x86_emulator_cache;
* When called, it means the previous get/set msr reached an invalid msr.
* Return true if we want to ignore/silent this failed msr access.
*/
-static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
- u64 data, bool write)
+static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
{
const char *op = write ? "wrmsr" : "rdmsr";
@@ -1445,7 +1444,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
if (r == KVM_MSR_RET_INVALID) {
/* Unconditionally clear the output for simplicity */
*data = 0;
- if (kvm_msr_ignored_check(vcpu, index, 0, false))
+ if (kvm_msr_ignored_check(index, 0, false))
r = 0;
}
@@ -1620,7 +1619,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
if (ret == KVM_MSR_RET_INVALID)
- if (kvm_msr_ignored_check(vcpu, index, data, true))
+ if (kvm_msr_ignored_check(index, data, true))
ret = 0;
return ret;
@@ -1658,7 +1657,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
if (ret == KVM_MSR_RET_INVALID) {
/* Unconditionally clear *data for simplicity */
*data = 0;
- if (kvm_msr_ignored_check(vcpu, index, 0, false))
+ if (kvm_msr_ignored_check(index, 0, false))
ret = 0;
}
@@ -2329,7 +2328,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
kvm_vcpu_write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
- spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+ spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
if (!matched) {
kvm->arch.nr_vcpus_matched_tsc = 0;
} else if (!already_matched) {
@@ -2337,7 +2336,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
}
kvm_track_tsc_matching(vcpu);
- spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
+ spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
}
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
@@ -2559,13 +2558,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
int i;
struct kvm_vcpu *vcpu;
struct kvm_arch *ka = &kvm->arch;
+ unsigned long flags;
kvm_hv_invalidate_tsc_page(kvm);
- spin_lock(&ka->pvclock_gtod_sync_lock);
kvm_make_mclock_inprogress_request(kvm);
+
/* no guest entries from this point */
+ spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
pvclock_update_vm_gtod_copy(kvm);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -2573,8 +2575,6 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
/* guest entries allowed */
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
- spin_unlock(&ka->pvclock_gtod_sync_lock);
#endif
}
@@ -2582,17 +2582,18 @@ u64 get_kvmclock_ns(struct kvm *kvm)
{
struct kvm_arch *ka = &kvm->arch;
struct pvclock_vcpu_time_info hv_clock;
+ unsigned long flags;
u64 ret;
- spin_lock(&ka->pvclock_gtod_sync_lock);
+ spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
if (!ka->use_master_clock) {
- spin_unlock(&ka->pvclock_gtod_sync_lock);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
return get_kvmclock_base_ns() + ka->kvmclock_offset;
}
hv_clock.tsc_timestamp = ka->master_cycle_now;
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
- spin_unlock(&ka->pvclock_gtod_sync_lock);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
get_cpu();
@@ -2686,13 +2687,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
- spin_lock(&ka->pvclock_gtod_sync_lock);
+ spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
use_master_clock = ka->use_master_clock;
if (use_master_clock) {
host_tsc = ka->master_cycle_now;
kernel_ns = ka->master_kernel_ns;
}
- spin_unlock(&ka->pvclock_gtod_sync_lock);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
@@ -5726,6 +5727,7 @@ set_pit2_out:
}
#endif
case KVM_SET_CLOCK: {
+ struct kvm_arch *ka = &kvm->arch;
struct kvm_clock_data user_ns;
u64 now_ns;
@@ -5744,8 +5746,22 @@ set_pit2_out:
* pvclock_update_vm_gtod_copy().
*/
kvm_gen_update_masterclock(kvm);
- now_ns = get_kvmclock_ns(kvm);
- kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
+
+ /*
+ * This pairs with kvm_guest_time_update(): when masterclock is
+ * in use, we use master_kernel_ns + kvmclock_offset to set
+ * unsigned 'system_time' so if we use get_kvmclock_ns() (which
+ * is slightly ahead) here we risk going negative on unsigned
+ * 'system_time' when 'user_ns.clock' is very small.
+ */
+ spin_lock_irq(&ka->pvclock_gtod_sync_lock);
+ if (kvm->arch.use_master_clock)
+ now_ns = ka->master_kernel_ns;
+ else
+ now_ns = get_kvmclock_base_ns();
+ ka->kvmclock_offset = user_ns.clock - now_ns;
+ spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
+
kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
break;
}
@@ -7724,6 +7740,7 @@ static void kvm_hyperv_tsc_notifier(void)
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int cpu;
+ unsigned long flags;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
@@ -7739,17 +7756,15 @@ static void kvm_hyperv_tsc_notifier(void)
list_for_each_entry(kvm, &vm_list, vm_list) {
struct kvm_arch *ka = &kvm->arch;
- spin_lock(&ka->pvclock_gtod_sync_lock);
-
+ spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
pvclock_update_vm_gtod_copy(kvm);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
kvm_for_each_vcpu(cpu, vcpu, kvm)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
kvm_for_each_vcpu(cpu, vcpu, kvm)
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
- spin_unlock(&ka->pvclock_gtod_sync_lock);
}
mutex_unlock(&kvm_lock);
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 39eb04887141..9035e34aa156 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -250,7 +250,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
-void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
u64 get_kvmclock_ns(struct kvm *kvm);
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 4b01f7dbaf30..ae78cef79980 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -262,7 +262,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
if (pgprot_val(old_prot) == pgprot_val(new_prot))
return;
- pa = pfn << page_level_shift(level);
+ pa = pfn << PAGE_SHIFT;
size = page_level_size(level);
/*
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 9eead60f0301..1a467b2a5467 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1689,7 +1689,16 @@ emit_jmp:
}
if (image) {
- if (unlikely(proglen + ilen > oldproglen)) {
+ /*
+ * When populating the image, assert that:
+ *
+ * i) We do not write beyond the allocated space, and
+ * ii) addrs[i] did not change from the prior run, in order
+ * to validate assumptions made for computing branch
+ * displacements.
+ */
+ if (unlikely(proglen + ilen > oldproglen ||
+ proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 0a7a2870f111..3da88ded6ee3 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -2469,7 +2469,16 @@ notyet:
}
if (image) {
- if (unlikely(proglen + ilen > oldproglen)) {
+ /*
+ * When populating the image, assert that:
+ *
+ * i) We do not write beyond the allocated space, and
+ * ii) addrs[i] did not change from the prior run, in order
+ * to validate assumptions made for computing branch
+ * displacements.
+ */
+ if (unlikely(proglen + ilen > oldproglen ||
+ proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 17d80f751fcb..ac06ca32e9ef 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -98,8 +98,8 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
-#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif
@@ -416,9 +416,6 @@ void __init xen_vmalloc_p2m_tree(void)
xen_p2m_last_pfn = xen_max_p2m_pfn;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
- if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
- p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
-
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1a3b75652fa4..8bfc10330107 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -59,6 +59,18 @@ static struct {
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
+/*
+ * The maximum amount of extra memory compared to the base size. The
+ * main scaling factor is the size of struct page. At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ *
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define EXTRA_MEM_RATIO (10)
+
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
static void __init xen_parse_512gb(void)
@@ -778,13 +790,13 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn;
/*
- * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
+ * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size.
*
* Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management.
*/
- extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn);
i = 0;
addr = xen_e820_table.entries[0].addr;
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index c426b846beef..45cc0ae0af6f 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -100,37 +100,6 @@
LOAD_CP_REGS_TAB(7)
/*
- * coprocessor_flush(struct thread_info*, index)
- * a2 a3
- *
- * Save coprocessor registers for coprocessor 'index'.
- * The register values are saved to or loaded from the coprocessor area
- * inside the task_info structure.
- *
- * Note that this function doesn't update the coprocessor_owner information!
- *
- */
-
-ENTRY(coprocessor_flush)
-
- /* reserve 4 bytes on stack to save a0 */
- abi_entry(4)
-
- s32i a0, a1, 0
- movi a0, .Lsave_cp_regs_jump_table
- addx8 a3, a3, a0
- l32i a4, a3, 4
- l32i a3, a3, 0
- add a2, a2, a4
- beqz a3, 1f
- callx0 a3
-1: l32i a0, a1, 0
-
- abi_ret(4)
-
-ENDPROC(coprocessor_flush)
-
-/*
* Entry condition:
*
* a0: trashed, original value saved on stack (PT_AREG0)
@@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)
ENDPROC(fast_coprocessor)
+ .text
+
+/*
+ * coprocessor_flush(struct thread_info*, index)
+ * a2 a3
+ *
+ * Save coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that this function doesn't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_flush)
+
+ /* reserve 4 bytes on stack to save a0 */
+ abi_entry(4)
+
+ s32i a0, a1, 0
+ movi a0, .Lsave_cp_regs_jump_table
+ addx8 a3, a3, a0
+ l32i a4, a3, 4
+ l32i a3, a3, 0
+ add a2, a2, a4
+ beqz a3, 1f
+ callx0 a3
+1: l32i a0, a1, 0
+
+ abi_ret(4)
+
+ENDPROC(coprocessor_flush)
+
.data
ENTRY(coprocessor_owner)
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 7666408ce12a..95a74890c7e9 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -112,8 +112,11 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags, regs);
- if (fault_signal_pending(fault, regs))
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto bad_page_fault;
return;
+ }
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
diff --git a/block/bio.c b/block/bio.c
index 26b7f721cda8..50e579088aca 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -277,7 +277,7 @@ static struct bio *__bio_chain_endio(struct bio *bio)
{
struct bio *parent = bio->bi_private;
- if (!parent->bi_status)
+ if (bio->bi_status && !parent->bi_status)
parent->bi_status = bio->bi_status;
bio_put(bio);
return parent;
@@ -949,7 +949,7 @@ void bio_release_pages(struct bio *bio, bool mark_dirty)
}
EXPORT_SYMBOL_GPL(bio_release_pages);
-static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
+static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
{
WARN_ON_ONCE(bio->bi_max_vecs);
@@ -959,11 +959,26 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
bio->bi_iter.bi_size = iter->count;
bio_set_flag(bio, BIO_NO_PAGE_REF);
bio_set_flag(bio, BIO_CLONED);
+}
+static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
+{
+ __bio_iov_bvec_set(bio, iter);
iov_iter_advance(iter, iter->count);
return 0;
}
+static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
+{
+ struct request_queue *q = bio->bi_bdev->bd_disk->queue;
+ struct iov_iter i = *iter;
+
+ iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
+ __bio_iov_bvec_set(bio, &i);
+ iov_iter_advance(iter, i.count);
+ return 0;
+}
+
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
/**
@@ -1094,8 +1109,8 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
int ret = 0;
if (iov_iter_is_bvec(iter)) {
- if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
- return -EINVAL;
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND)
+ return bio_iov_bvec_set_append(bio, iter);
return bio_iov_bvec_set(bio, iter);
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index ffb4aa0ea68b..4d97fb6dd226 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -382,6 +382,14 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
switch (bio_op(rq->bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
+ if (queue_max_discard_segments(rq->q) > 1) {
+ struct bio *bio = rq->bio;
+
+ for_each_bio(bio)
+ nr_phys_segs++;
+ return nr_phys_segs;
+ }
+ return 1;
case REQ_OP_WRITE_ZEROES:
return 0;
case REQ_OP_WRITE_SAME:
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 9ebb344e2585..271f6596435b 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -302,7 +302,6 @@ static const char *const rqf_name[] = {
RQF_NAME(QUIET),
RQF_NAME(ELVPRIV),
RQF_NAME(IO_STAT),
- RQF_NAME(ALLOCED),
RQF_NAME(PM),
RQF_NAME(HASHED),
RQF_NAME(STATS),
diff --git a/block/partitions/core.c b/block/partitions/core.c
index 1a7558917c47..46f055bc7ecb 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -323,6 +323,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
int err;
/*
+ * disk_max_parts() won't be zero, either GENHD_FL_EXT_DEVT is set
+ * or 'minors' is passed to alloc_disk().
+ */
+ if (partno >= disk_max_parts(disk))
+ return ERR_PTR(-EINVAL);
+
+ /*
* Partitions are not supported on zoned block devices that are used as
* such.
*/
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 3f045b5953b2..a0c1a665dfc1 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void)
* just create and link the new node(s) here.
*/
new_node =
- ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node));
+ acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name));
if (!new_node) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
- ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name);
new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
new_node->type = init_val->type;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e6a5d997241c..cb8f70842249 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -9,6 +9,8 @@
#ifndef _ACPI_INTERNAL_H_
#define _ACPI_INTERNAL_H_
+#include <linux/idr.h>
+
#define PREFIX "ACPI: "
int early_acpi_osi_init(void);
@@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
extern struct list_head acpi_bus_id_list;
+#define ACPI_MAX_DEVICE_INSTANCES 4096
+
struct acpi_device_bus_id {
const char *bus_id;
- unsigned int instance_no;
+ struct ida instance_ida;
struct list_head node;
};
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d93e400940a3..4e2d76b8b697 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -29,6 +29,7 @@
*/
#ifdef CONFIG_X86
#include <asm/apic.h>
+#include <asm/cpu.h>
#endif
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
@@ -541,6 +542,10 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
wait_for_freeze();
} else
return -ENODEV;
+
+#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
+ cond_wakeup_cpu0();
+#endif
}
/* Never reached */
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index a184529d8fa4..6efe7edd7b1e 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -479,9 +479,8 @@ static void acpi_device_del(struct acpi_device *device)
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
if (!strcmp(acpi_device_bus_id->bus_id,
acpi_device_hid(device))) {
- if (acpi_device_bus_id->instance_no > 0)
- acpi_device_bus_id->instance_no--;
- else {
+ ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
+ if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
list_del(&acpi_device_bus_id->node);
kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
@@ -631,6 +630,21 @@ static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
return NULL;
}
+static int acpi_device_set_name(struct acpi_device *device,
+ struct acpi_device_bus_id *acpi_device_bus_id)
+{
+ struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
+ int result;
+
+ result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
+ if (result < 0)
+ return result;
+
+ device->pnp.instance_no = result;
+ dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
+ return 0;
+}
+
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
@@ -665,7 +679,9 @@ int acpi_device_add(struct acpi_device *device,
acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
if (acpi_device_bus_id) {
- acpi_device_bus_id->instance_no++;
+ result = acpi_device_set_name(device, acpi_device_bus_id);
+ if (result)
+ goto err_unlock;
} else {
acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
GFP_KERNEL);
@@ -681,9 +697,16 @@ int acpi_device_add(struct acpi_device *device,
goto err_unlock;
}
+ ida_init(&acpi_device_bus_id->instance_ida);
+
+ result = acpi_device_set_name(device, acpi_device_bus_id);
+ if (result) {
+ kfree(acpi_device_bus_id);
+ goto err_unlock;
+ }
+
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
- dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
if (device->parent)
list_add_tail(&device->node, &device->parent->children);
@@ -1647,6 +1670,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
+ /* Assume there are unmet deps to start with. */
+ device->dep_unmet = 1;
}
void acpi_device_add_finalize(struct acpi_device *device)
@@ -1910,6 +1935,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
{
struct acpi_dep_data *dep;
+ adev->dep_unmet = 0;
+
mutex_lock(&acpi_dep_list_lock);
list_for_each_entry(dep, &acpi_dep_list, node) {
@@ -1957,7 +1984,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
return AE_CTRL_DEPTH;
acpi_scan_init_hotplug(device);
- if (!check_dep)
+ /*
+ * If check_dep is true at this point, the device has no dependencies,
+ * or the creation of the device object would have been postponed above.
+ */
+ if (check_dep)
+ device->dep_unmet = 0;
+ else
acpi_scan_dep_init(device);
out:
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index e48690a006a4..9d581045acff 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
}
/*
- * acpi_table_init()
+ * acpi_locate_initial_tables()
*
* find RSDP, find and checksum SDT/XSDT.
* checksum all tables, print SDT/XSDT
@@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
* result: sdt_entry[] is initialized
*/
-int __init acpi_table_init(void)
+int __init acpi_locate_initial_tables(void)
{
acpi_status status;
@@ -803,9 +803,45 @@ int __init acpi_table_init(void)
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
return -EINVAL;
- acpi_table_initrd_scan();
+ return 0;
+}
+
+void __init acpi_reserve_initial_tables(void)
+{
+ int i;
+
+ for (i = 0; i < ACPI_MAX_TABLES; i++) {
+ struct acpi_table_desc *table_desc = &initial_tables[i];
+ u64 start = table_desc->address;
+ u64 size = table_desc->length;
+
+ if (!start || !size)
+ break;
+
+ pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
+ table_desc->signature.ascii, start, start + size - 1);
+
+ memblock_reserve(start, size);
+ }
+}
+
+void __init acpi_table_init_complete(void)
+{
+ acpi_table_initrd_scan();
check_multiple_madt();
+}
+
+int __init acpi_table_init(void)
+{
+ int ret;
+
+ ret = acpi_locate_initial_tables();
+ if (ret)
+ return ret;
+
+ acpi_table_init_complete();
+
return 0;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 811d298637cb..83cd4c95faf0 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -147,6 +147,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_vendor,
.ident = "Sony VPCEH3U1E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index f43430e9dcee..24fd6f369ebe 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -470,12 +470,14 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
char c;
for (; count-- > 0; (*ppos)++, tmp++) {
- if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
+ if (((count + 1) & 0x1f) == 0) {
/*
- * let's be a little nice with other processes
- * that need some CPU
+ * charlcd_write() is invoked as a VFS->write() callback
+ * and as such it is always invoked from preemptible
+ * context and may sleep.
*/
- schedule();
+ cond_resched();
+ }
if (get_user(c, tmp))
return -EFAULT;
@@ -537,12 +539,8 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
int count = strlen(s);
for (; count-- > 0; tmp++) {
- if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
- /*
- * let's be a little nice with other processes
- * that need some CPU
- */
- schedule();
+ if (((count + 1) & 0x1f) == 0)
+ cond_resched();
charlcd_write_char(lcd, *tmp);
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9179825ff646..e2cf3b29123e 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -97,6 +97,9 @@ static void deferred_probe_work_func(struct work_struct *work)
get_device(dev);
+ kfree(dev->p->deferred_probe_reason);
+ dev->p->deferred_probe_reason = NULL;
+
/*
* Drop the mutex while probing each device; the probe path may
* manipulate the deferred list
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index a46a7e30881b..fe1dad68aee4 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
return 0;
}
-static void rpm_put_suppliers(struct device *dev)
+static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
{
struct device_link *link;
@@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
device_links_read_lock_held()) {
while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put(link->supplier);
+ pm_runtime_put_noidle(link->supplier);
+
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
}
}
+static void rpm_put_suppliers(struct device *dev)
+{
+ __rpm_put_suppliers(dev, true);
+}
+
+static void rpm_suspend_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ pm_request_idle(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
- if (retval)
+ if (retval) {
+ rpm_put_suppliers(dev);
goto fail;
+ }
device_links_read_unlock(idx);
}
@@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock();
- fail:
- rpm_put_suppliers(dev);
+ __rpm_put_suppliers(dev, false);
+fail:
device_links_read_unlock(idx);
}
@@ -642,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe)
+ goto out;
+
/* Maybe the parent is now able to suspend. */
- if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+ if (parent && !parent->power.ignore_children) {
spin_unlock(&dev->power.lock);
spin_lock(&parent->power.lock);
@@ -652,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
spin_lock(&dev->power.lock);
}
+ /* Maybe the suppliers are now able to suspend. */
+ if (dev->power.links_count > 0) {
+ spin_unlock_irq(&dev->power.lock);
+
+ rpm_suspend_suppliers(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ }
out:
trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
@@ -1657,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
device_links_read_lock_held())
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
- refcount_inc(&link->rpm_active);
pm_runtime_get_sync(link->supplier);
+ refcount_inc(&link->rpm_active);
}
device_links_read_unlock(idx);
@@ -1671,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
+ unsigned long flags;
+ bool put;
int idx;
idx = device_links_read_lock();
@@ -1679,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
device_links_read_lock_held())
if (link->supplier_preactivated) {
link->supplier_preactivated = false;
- if (refcount_dec_not_one(&link->rpm_active))
+ spin_lock_irqsave(&dev->power.lock, flags);
+ put = pm_runtime_status_suspended(dev) &&
+ refcount_dec_not_one(&link->rpm_active);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ if (put)
pm_runtime_put(link->supplier);
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index d6c821d48090..51bfd7737552 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1369,10 +1369,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
}
if (dev->zoned)
- cmd->error = null_process_zoned_cmd(cmd, op,
- sector, nr_sectors);
+ sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
else
- cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
+ sts = null_process_cmd(cmd, op, sector, nr_sectors);
+
+ /* Do not overwrite errors (e.g. timeout errors) */
+ if (cmd->error == BLK_STS_OK)
+ cmd->error = sts;
out:
nullb_complete_cmd(cmd);
@@ -1451,8 +1454,20 @@ static bool should_requeue_request(struct request *rq)
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
pr_info("rq %p timed out\n", rq);
- blk_mq_complete_request(rq);
+
+ /*
+ * If the device is marked as blocking (i.e. memory backed or zoned
+ * device), the submission path may be blocked waiting for resources
+ * and cause real timeouts. For these real timeouts, the submission
+ * path will complete the request using blk_mq_complete_request().
+ * Only fake timeouts need to execute blk_mq_complete_request() here.
+ */
+ cmd->error = BLK_STS_TIMEOUT;
+ if (cmd->fake_timeout)
+ blk_mq_complete_request(rq);
return BLK_EH_DONE;
}
@@ -1473,6 +1488,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->rq = bd->rq;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
+ cmd->fake_timeout = should_timeout_request(bd->rq);
blk_mq_start_request(bd->rq);
@@ -1489,7 +1505,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
}
- if (should_timeout_request(bd->rq))
+ if (cmd->fake_timeout)
return BLK_STS_OK;
return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 83504f3cc9d6..4876d5adb12d 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -22,6 +22,7 @@ struct nullb_cmd {
blk_status_t error;
struct nullb_queue *nq;
struct hrtimer timer;
+ bool fake_timeout;
};
struct nullb_queue {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 1cdf09ff67b6..14e452896d04 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -891,7 +891,7 @@ next:
out:
for (i = last_map; i < num; i++) {
/* Don't zap current batch's valid persistent grants. */
- if(i >= last_map + segs_to_map)
+ if(i >= map_until)
pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 192cb8c191bc..5d603ef39bad 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -4767,8 +4767,8 @@ static int btusb_probe(struct usb_interface *intf,
data->diag = NULL;
}
- if (!enable_autosuspend)
- usb_disable_autosuspend(data->udev);
+ if (enable_autosuspend)
+ usb_enable_autosuspend(data->udev);
err = hci_register_dev(hdev);
if (err < 0)
@@ -4828,9 +4828,6 @@ static void btusb_disconnect(struct usb_interface *intf)
gpiod_put(data->reset_gpio);
hci_free_dev(hdev);
-
- if (!enable_autosuspend)
- usb_enable_autosuspend(data->udev);
}
#ifdef CONFIG_PM
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index dd9e7343a5e3..ea0424922de7 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -618,7 +618,7 @@ mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
* This part of the memory is above 4 GB, so we don't
* care for the MBus bridge hole.
*/
- if (reg_start >= 0x100000000ULL)
+ if ((u64)reg_start >= 0x100000000ULL)
continue;
/*
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index b040447575ad..dcfb32ee5cb6 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
*/
l3->debug_irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
- 0x0, "l3-dbg-irq", l3);
+ IRQF_NO_THREAD, "l3-dbg-irq", l3);
if (ret) {
dev_err(l3->dev, "request_irq failed for %d\n",
l3->debug_irq);
@@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
l3->app_irq = platform_get_irq(pdev, 1);
ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
- 0x0, "l3-app-irq", l3);
+ IRQF_NO_THREAD, "l3-app-irq", l3);
if (ret)
dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index a27d751cf219..3d74f237f005 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3053,7 +3053,9 @@ static int sysc_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- reset_control_assert(ddata->rsts);
+
+ if (!reset_control_status(ddata->rsts))
+ reset_control_assert(ddata->rsts);
unprepare:
sysc_unprepare(ddata);
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index a086dd34f932..4f501e4842ab 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -125,7 +125,7 @@ config AGP_HP_ZX1
config AGP_PARISC
tristate "HP Quicksilver AGP support"
- depends on AGP && PARISC && 64BIT
+ depends on AGP && PARISC && 64BIT && IOMMU_SBA
help
This option gives you AGP GART support for the HP Quicksilver
AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index d3f756f7b5a0..67e56cf638ef 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -267,7 +267,7 @@ struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
__ATTR_RO(_name##_frequencies)
/*
- * show_scaling_available_frequencies - show available normal frequencies for
+ * scaling_available_frequencies_show - show available normal frequencies for
* the specified CPU
*/
static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
@@ -279,7 +279,7 @@ cpufreq_attr_available_freq(scaling_available);
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
/*
- * show_available_boost_freqs - show available boost frequencies for
+ * scaling_boost_frequencies_show - show available boost frequencies for
* the specified CPU
*/
static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 0a6438cbb3f3..e7a9561a826d 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev)
sizeof(*edev->nh), GFP_KERNEL);
if (!edev->nh) {
ret = -ENOMEM;
+ device_unregister(&edev->dev);
goto err_dev;
}
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 5fd6a60b6741..88ed971e32c0 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct client *client = file->private_data;
spinlock_t *client_list_lock = &client->lynx->client_list_lock;
struct nosy_stats stats;
+ int ret;
switch (cmd) {
case NOSY_IOC_GET_STATS:
@@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
case NOSY_IOC_START:
+ ret = -EBUSY;
spin_lock_irq(client_list_lock);
- list_add_tail(&client->link, &client->lynx->client_list);
+ if (list_empty(&client->link)) {
+ list_add_tail(&client->link, &client->lynx->client_list);
+ ret = 0;
+ }
spin_unlock_irq(client_list_lock);
- return 0;
+ return ret;
case NOSY_IOC_STOP:
spin_lock_irq(client_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 49267eb64302..29885febc0b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1007,13 +1007,9 @@ struct amdgpu_device {
/* s3/s4 mask */
bool in_suspend;
- bool in_hibernate;
-
- /*
- * The combination flag in_poweroff_reboot_com used to identify the poweroff
- * and reboot opt in the s0i3 system-wide suspend.
- */
- bool in_poweroff_reboot_com;
+ bool in_s3;
+ bool in_s4;
+ bool in_s0ix;
atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6447cd6ca5a8..8a5a8ff5d362 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2371,6 +2371,10 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ /* skip CG for GFX on S0ix */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2402,6 +2406,10 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ /* skip PG for GFX on S0ix */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2678,11 +2686,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- if (adev->in_poweroff_reboot_com ||
- !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- }
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
@@ -2722,6 +2727,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
int i, r;
+ if (adev->in_s0ix)
+ amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -2734,6 +2742,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
continue;
}
+
+ /* skip suspend of gfx and psp for S0ix
+ * gfx is in gfxoff state, so on resume it will exit gfxoff just
+ * like at runtime. PSP is also part of the always on hardware
+ * so no need to suspend it.
+ */
+ if (adev->in_s0ix &&
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
+ continue;
+
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -3673,14 +3692,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
*/
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
- struct amdgpu_device *adev;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- adev = drm_to_adev(dev);
-
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -3692,61 +3706,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
cancel_delayed_work_sync(&adev->delayed_init_work);
- if (!amdgpu_device_has_dc_support(adev)) {
- /* turn off display hw */
- drm_modeset_lock_all(dev);
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter)
- drm_helper_connector_dpms(connector,
- DRM_MODE_DPMS_OFF);
- drm_connector_list_iter_end(&iter);
- drm_modeset_unlock_all(dev);
- /* unpin the front buffers and cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct amdgpu_bo *robj;
-
- if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- }
-
- if (fb == NULL || fb->obj[0] == NULL) {
- continue;
- }
- robj = gem_to_amdgpu_bo(fb->obj[0]);
- /* don't unpin kernel fb objects */
- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
- }
- }
- }
- }
-
amdgpu_ras_suspend(adev);
r = amdgpu_device_ip_suspend_phase1(adev);
- amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ if (!adev->in_s0ix)
+ amdgpu_amdkfd_suspend(adev, adev->in_runpm);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
- if (adev->in_poweroff_reboot_com ||
- !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
- r = amdgpu_device_ip_suspend_phase2(adev);
- else
- amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+ r = amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
* using the CPU.
@@ -3768,16 +3740,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
*/
int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
{
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
struct amdgpu_device *adev = drm_to_adev(dev);
- struct drm_crtc *crtc;
int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (amdgpu_acpi_is_s0ix_supported(adev))
+ if (adev->in_s0ix)
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
/* post card */
@@ -3802,50 +3771,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
- if (!amdgpu_device_has_dc_support(adev)) {
- /* pin cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
- if (r != 0)
- dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
- amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- }
- }
+ if (!adev->in_s0ix) {
+ r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+ if (r)
+ return r;
}
- r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
- if (r)
- return r;
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
- /* blat the mode back in */
- if (fbcon) {
- if (!amdgpu_device_has_dc_support(adev)) {
- /* pre DCE11 */
- drm_helper_resume_force_mode(dev);
-
- /* turn on display hw */
- drm_modeset_lock_all(dev);
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter)
- drm_helper_connector_dpms(connector,
- DRM_MODE_DPMS_ON);
- drm_connector_list_iter_end(&iter);
-
- drm_modeset_unlock_all(dev);
- }
+ if (fbcon)
amdgpu_fbdev_set_suspend(adev, 0);
- }
drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 48cb33e5b382..f753e04fee99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1310,3 +1310,92 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
stime, etime, mode);
}
+
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ int r;
+
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_OFF);
+ drm_connector_list_iter_end(&iter);
+ drm_modeset_unlock_all(dev);
+ /* unpin the front buffers and cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct amdgpu_bo *robj;
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ }
+
+ if (fb == NULL || fb->obj[0] == NULL) {
+ continue;
+ }
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
+ /* don't unpin kernel fb objects */
+ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
+ }
+ }
+ return r;
+}
+
+int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_crtc *crtc;
+ int r;
+
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r != 0)
+ dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ }
+ }
+
+ drm_helper_resume_force_mode(dev);
+
+ /* turn on display hw */
+ drm_modeset_lock_all(dev);
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_ON);
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index dc7b7d116549..7b6d83e2b13c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -47,4 +47,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier);
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
+int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b26e2fd1c538..e92e7dea71da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1107,6 +1107,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
/* Van Gogh */
@@ -1274,24 +1275,35 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
- adev->in_poweroff_reboot_com = true;
amdgpu_device_ip_suspend(adev);
- adev->in_poweroff_reboot_com = false;
adev->mp1_state = PP_MP1_STATE_NONE;
}
static int amdgpu_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_suspend(drm_dev, true);
+ if (amdgpu_acpi_is_s0ix_supported(adev))
+ adev->in_s0ix = true;
+ adev->in_s3 = true;
+ r = amdgpu_device_suspend(drm_dev, true);
+ adev->in_s3 = false;
+
+ return r;
}
static int amdgpu_pmops_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_resume(drm_dev, true);
+ r = amdgpu_device_resume(drm_dev, true);
+ if (amdgpu_acpi_is_s0ix_supported(adev))
+ adev->in_s0ix = false;
+ return r;
}
static int amdgpu_pmops_freeze(struct device *dev)
@@ -1300,9 +1312,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
- adev->in_hibernate = true;
+ adev->in_s4 = true;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_hibernate = false;
+ adev->in_s4 = false;
if (r)
return r;
return amdgpu_asic_reset(adev);
@@ -1318,13 +1330,8 @@ static int amdgpu_pmops_thaw(struct device *dev)
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
- int r;
- adev->in_poweroff_reboot_com = true;
- r = amdgpu_device_suspend(drm_dev, true);
- adev->in_poweroff_reboot_com = false;
- return r;
+ return amdgpu_device_suspend(drm_dev, true);
}
static int amdgpu_pmops_restore(struct device *dev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 64beb3399604..a4e2cf7cada1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -778,9 +778,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
}
- dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+ dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
- dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
+ dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info->cu_active_number = adev->gfx.cu_info.number;
dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info->ce_ram_size = adev->gfx.ce_ram_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4b29b8205442..072050429a2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1028,13 +1028,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
struct ttm_resource_manager *man;
- /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-#ifndef CONFIG_HIBERNATION
- if (adev->flags & AMD_IS_APU) {
- /* Useless to evict on IGP chips */
+ if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
+ /* No need to evict vram on APUs for suspend to ram */
return 0;
}
-#endif
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9fd2157b133a..5efa331e3ee8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -906,7 +906,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
/* Allocate an SG array and squash pages into it */
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
- ttm->num_pages << PAGE_SHIFT,
+ (u64)ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (r)
goto release_sg;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ad91c0c3c423..7d2c8b169827 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2197,8 +2197,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t eaddr;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2263,8 +2263,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
int r;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2409,7 +2409,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
after->start = eaddr + 1;
after->last = tmp->last;
after->offset = tmp->offset;
- after->offset += after->start - tmp->start;
+ after->offset += (after->start - tmp->start) << PAGE_SHIFT;
after->flags = tmp->flags;
after->bo_va = tmp->bo_va;
list_add(&after->list, &tmp->bo_va->invalids);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 7944781e1086..19abb740a169 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2897,6 +2897,11 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2921,8 +2926,10 @@ static int dce_v10_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_v10_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 1b6ff0470011..320ec35bfd37 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3027,6 +3027,11 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -3051,8 +3056,10 @@ static int dce_v11_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_v11_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 83a88385b762..13322000ebd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2770,7 +2770,11 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2794,8 +2798,10 @@ static int dce_v6_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_v6_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 224b30214427..04ebf02e5b8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2796,6 +2796,11 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2820,8 +2825,10 @@ static int dce_v8_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_v8_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 9810af712cc0..5c11144da051 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -39,6 +39,7 @@
#include "dce_v11_0.h"
#include "dce_virtual.h"
#include "ivsrcid/ivsrcid_vislands30.h"
+#include "amdgpu_display.h"
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
@@ -491,12 +492,24 @@ static int dce_virtual_hw_fini(void *handle)
static int dce_virtual_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
return dce_virtual_hw_fini(handle);
}
static int dce_virtual_resume(void *handle)
{
- return dce_virtual_hw_init(handle);
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = dce_virtual_hw_init(handle);
+ if (r)
+ return r;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_virtual_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index b258a3dae767..159add0f5aaa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
/* Wait till CP writes sync code: */
status = amdkfd_fence_wait_timeout(
- (unsigned int *) rm_state,
+ rm_state,
QUEUESTATE__ACTIVE, 1500);
kfd_gtt_sa_free(dbgdev->dev, mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e686ce2bf3b3..4598a9a58125 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
if (retval)
goto fail_allocate_vidmem;
- dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+ dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
init_interrupts(dqm);
@@ -1340,8 +1340,8 @@ out:
return retval;
}
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
- unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+ uint64_t fence_value,
unsigned int timeout_ms)
{
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 7351dd195274..45f815946554 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -192,7 +192,7 @@ struct device_queue_manager {
uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
uint64_t fence_gpu_addr;
- unsigned int *fence_addr;
+ uint64_t *fence_addr;
struct kfd_mem_obj *fence_mem;
bool active_runlist;
int sched_policy;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 5d541e0cc8ca..f71a7fa6680c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -347,7 +347,7 @@ fail_create_runlist_ib:
}
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
- uint32_t fence_value)
+ uint64_t fence_value)
{
uint32_t *buffer, size;
int retval = 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index dfaf771a42e6..e3ba0cd3b6fa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
}
static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
- uint64_t fence_address, uint32_t fence_value)
+ uint64_t fence_address, uint64_t fence_value)
{
struct pm4_mes_query_status *packet;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index a852e0d7d804..08442e7d9944 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
}
static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
- uint64_t fence_address, uint32_t fence_value)
+ uint64_t fence_address, uint64_t fence_value)
{
struct pm4_mes_query_status *packet;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 09599efa41fc..f304d1f8df5f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
- unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+ uint64_t fence_value,
unsigned int timeout_ms);
/* Packet Manager */
@@ -1040,7 +1040,7 @@ struct packet_manager_funcs {
uint32_t filter_param, bool reset,
unsigned int sdma_engine);
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
- uint64_t fence_address, uint32_t fence_value);
+ uint64_t fence_address, uint64_t fence_value);
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
/* Packet sizes */
@@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res);
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
- uint32_t fence_value);
+ uint64_t fence_value);
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
enum kfd_unmap_queues_filter mode,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index fa013496e26b..2f9bfaeaba8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
} else {
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
- AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
-
+ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
}
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
index 705fbfc37502..8a32772d4e91 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -134,6 +134,7 @@
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index a2681fe875ed..c0565a932a12 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -587,6 +587,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
tmp, MC_CG_ARB_FREQ_F0);
}
+static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint16_t pcie_gen = 0;
+
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
+ adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
+ adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
+ adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
+ adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ return pcie_gen;
+}
+
+static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint16_t pcie_width = 0;
+
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 16;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 12;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 8;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ return pcie_width;
+}
+
static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -683,6 +725,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
PP_Min_PCIEGen),
get_pcie_lane_support(data->pcie_lane_cap,
PP_Max_PCIELane));
+
+ if (data->pcie_dpm_key_disabled)
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+ data->dpm_table.pcie_speed_table.count,
+ smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
}
return 0;
}
@@ -1177,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(hwmgr->chip_id == CHIP_POLARIS10) ||
(hwmgr->chip_id == CHIP_POLARIS11) ||
(hwmgr->chip_id == CHIP_POLARIS12) ||
- (hwmgr->chip_id == CHIP_TONGA))
+ (hwmgr->chip_id == CHIP_TONGA) ||
+ (hwmgr->chip_id == CHIP_TOPAZ))
PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
@@ -1248,6 +1296,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
NULL)),
"Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL);
+ } else {
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_PCIeDPM_Disable,
+ NULL)),
+ "Failed to disble pcie DPM during DPM Start Function!",
+ return -EINVAL);
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -3276,7 +3331,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
!hwmgr->display_config->multi_monitor_in_sync) ||
- smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
+ (hwmgr->display_config->num_display &&
+ smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
disable_mclk_switching_for_display;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 22b636e2b89b..599ec9726601 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -54,6 +54,9 @@
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
#define HBM_MEMORY_CHANNEL_WIDTH 128
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
@@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_VCEDPM))
data->smu_features[GNLD_DPM_VCE].supported = true;
- if (!data->registry_data.pcie_dpm_key_disabled)
- data->smu_features[GNLD_DPM_LINK].supported = true;
+ data->smu_features[GNLD_DPM_LINK].supported = true;
if (!data->registry_data.dcefclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
@@ -1544,6 +1546,13 @@ static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width;
}
+ if (data->registry_data.pcie_dpm_key_disabled) {
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ pp_table->PcieGenSpeed[i] = pcie_gen;
+ pp_table->PcieLaneCount[i] = pcie_width;
+ }
+ }
+
return 0;
}
@@ -2966,6 +2975,14 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
+ if (data->registry_data.pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
+ false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap),
+ "Attempt to Disable Link DPM feature Failed!", return -EINVAL);
+ data->smu_features[GNLD_DPM_LINK].enabled = false;
+ data->smu_features[GNLD_DPM_LINK].supported = false;
+ }
+
return 0;
}
@@ -4584,6 +4601,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return 0;
}
+static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+}
+
+static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+}
+
static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
@@ -4592,8 +4627,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
- struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
+ uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
+ PPTable_t *pptable = &(data->smc_state_table.pp_table);
int i, now, size = 0, count = 0;
@@ -4650,15 +4686,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
"*" : "");
break;
case PP_PCIE:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
- (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
- (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
- (i == now) ? "*" : "");
+ current_gen_speed =
+ vega10_get_current_pcie_link_speed_level(hwmgr);
+ current_lane_width =
+ vega10_get_current_pcie_link_width_level(hwmgr);
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ gen_speed = pptable->PcieGenSpeed[i];
+ lane_width = pptable->PcieLaneCount[i];
+
+ size += sprintf(buf + size, "%d: %s %s %s\n", i,
+ (gen_speed == 0) ? "2.5GT/s," :
+ (gen_speed == 1) ? "5.0GT/s," :
+ (gen_speed == 2) ? "8.0GT/s," :
+ (gen_speed == 3) ? "16.0GT/s," : "",
+ (lane_width == 1) ? "x1" :
+ (lane_width == 2) ? "x2" :
+ (lane_width == 3) ? "x4" :
+ (lane_width == 4) ? "x8" :
+ (lane_width == 5) ? "x12" :
+ (lane_width == 6) ? "x16" : "",
+ (current_gen_speed == gen_speed) &&
+ (current_lane_width == lane_width) ?
+ "*" : "");
+ }
break;
+
case OD_SCLK:
if (hwmgr->od_enabled) {
size = sprintf(buf, "%s:\n", "OD_SCLK");
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index 43e01d880f7c..4f6da11e8f10 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
data->registry_data.auto_wattman_threshold = 50;
+ data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
}
static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@@ -539,6 +540,29 @@ static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width_arg;
}
+ /* override to the highest if it's disabled from ppfeaturmask */
+ if (data->registry_data.pcie_dpm_key_disabled) {
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+ NULL);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[OverridePcieParameters] Attempt to override pcie params failed!",
+ return ret);
+
+ pp_table->PcieGenSpeed[i] = pcie_gen;
+ pp_table->PcieLaneCount[i] = pcie_width;
+ }
+ ret = vega12_enable_smc_features(hwmgr,
+ false,
+ data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Attempt to Disable DPM LINK Failed!",
+ return ret);
+ data->smu_features[GNLD_DPM_LINK].enabled = false;
+ data->smu_features[GNLD_DPM_LINK].supported = false;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index f19964c69a00..b6ee3a285c9d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
data->counter_gfxoff = 0;
+ data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
}
static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@@ -884,6 +885,30 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width_arg;
}
+ /* override to the highest if it's disabled from ppfeaturmask */
+ if (data->registry_data.pcie_dpm_key_disabled) {
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+ NULL);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[OverridePcieParameters] Attempt to override pcie params failed!",
+ return ret);
+
+ pp_table->PcieGenSpeed[i] = pcie_gen;
+ pp_table->PcieLaneCount[i] = pcie_width;
+ }
+ ret = vega20_enable_smc_features(hwmgr,
+ false,
+ data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Attempt to Disable DPM LINK Failed!",
+ return ret);
+ data->smu_features[GNLD_DPM_LINK].enabled = false;
+ data->smu_features[GNLD_DPM_LINK].supported = false;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index d143ef1b460b..cd905e41080e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1294,7 +1294,7 @@ static int smu_disable_dpms(struct smu_context *smu)
bool use_baco = !smu->is_apu &&
((amdgpu_in_reset(adev) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
- ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
+ ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
/*
* For custom pptable uploading, skip the DPM features
@@ -1431,7 +1431,8 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
- if (smu->is_apu)
+ /* skip CGPG when in S0ix */
+ if (smu->is_apu && !adev->in_s0ix)
smu_set_gfx_cgpg(&adev->smu, false);
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 7ddbaecb11c2..101eaa20db9b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
static bool vangogh_is_dpm_running(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint32_t feature_mask[2];
uint64_t feature_enabled;
+ /* we need to re-init after suspend so return false */
+ if (adev->in_suspend)
+ return false;
+
ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
if (ret)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 6d38c5c17f23..db69f19ab5bc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -689,7 +689,8 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct page **pages = pvec + pinned;
ret = pin_user_pages_fast(ptr, num_pages,
- !userptr->ro ? FOLL_WRITE : 0, pages);
+ FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
+ pages);
if (ret < 0) {
unpin_user_pages(pvec, pinned);
kvfree(pvec);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 1f79bc2a881e..1510e4e2973c 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -13,7 +13,6 @@
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index e21fb14d5e07..833d0c1be4f1 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
return;
}
+ if (!pkg->package.count) {
+ DRM_DEBUG_DRIVER("no connection in _DSM\n");
+ return;
+ }
+
connector_count = &pkg->package.elements[0];
DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
(unsigned long long)connector_count->integer.value);
for (i = 1; i < pkg->package.count; i++) {
union acpi_object *obj = &pkg->package.elements[i];
- union acpi_object *connector_id = &obj->package.elements[0];
- union acpi_object *info = &obj->package.elements[1];
+ union acpi_object *connector_id;
+ union acpi_object *info;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
+ DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
+ continue;
+ }
+
+ connector_id = &obj->package.elements[0];
+ info = &obj->package.elements[1];
+ if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
+ DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
+ continue;
+ }
+
DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
(unsigned long long)connector_id->integer.value);
DRM_DEBUG_DRIVER(" port id: %s\n",
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 4683f98f7e54..c3f2962aa1eb 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -317,12 +317,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
- new_crtc_state->enabled_planes |= BIT(plane->id);
-
ret = plane->check_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
+ if (fb)
+ new_crtc_state->enabled_planes |= BIT(plane->id);
+
/* FIXME pre-g4x don't work like this */
if (new_plane_state->uapi.visible)
new_crtc_state->active_planes |= BIT(plane->id);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 8c12d5375607..775d89b6c3fc 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -3619,9 +3619,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
int ret;
- intel_dp_lttpr_init(intel_dp);
-
- if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
+ if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
return false;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index eaebf123310a..10fe17b7280d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -133,6 +133,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
else
precharge = 5;
+ /* Max timeout value on G4x-BDW: 1.6ms */
if (IS_BROADWELL(dev_priv))
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
@@ -159,6 +160,12 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
u32 ret;
+ /*
+ * Max timeout values:
+ * SKL-GLK: 1.6ms
+ * CNL: 3.2ms
+ * ICL+: 4ms
+ */
ret = DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_INTERRUPT |
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 892d7db7d94f..be6ac0dd846e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -34,6 +34,11 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
+static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
+{
+ memset(&intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
+}
+
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
@@ -81,19 +86,36 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
- if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
- intel_dp->lttpr_common_caps) < 0) {
- memset(intel_dp->lttpr_common_caps, 0,
- sizeof(intel_dp->lttpr_common_caps));
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (intel_dp_is_edp(intel_dp))
return false;
- }
+
+ /*
+ * Detecting LTTPRs must be avoided on platforms with an AUX timeout
+ * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
+ */
+ if (INTEL_GEN(i915) < 10)
+ return false;
+
+ if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
+ intel_dp->lttpr_common_caps) < 0)
+ goto reset_caps;
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"LTTPR common capabilities: %*ph\n",
(int)sizeof(intel_dp->lttpr_common_caps),
intel_dp->lttpr_common_caps);
+ /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
+ if (intel_dp->lttpr_common_caps[0] < 0x14)
+ goto reset_caps;
+
return true;
+
+reset_caps:
+ intel_dp_reset_lttpr_common_caps(intel_dp);
+ return false;
}
static bool
@@ -106,33 +128,49 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
}
/**
- * intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode
+ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
* @intel_dp: Intel DP struct
*
- * Read the LTTPR common capabilities, switch to non-transparent link training
- * mode if any is detected and read the PHY capabilities for all detected
- * LTTPRs. In case of an LTTPR detection error or if the number of
+ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+ * link training mode if any is detected and read the PHY capabilities for all
+ * detected LTTPRs. In case of an LTTPR detection error or if the number of
* LTTPRs is more than is supported (8), fall back to the no-LTTPR,
* transparent mode link training mode.
*
* Returns:
- * >0 if LTTPRs were detected and the non-transparent LT mode was set
+ * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
+ * DPRX capabilities are read out.
* 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
- * detection failure and the transparent LT mode was set
+ * detection failure and the transparent LT mode was set. The DPRX
+ * capabilities are read out.
+ * <0 Reading out the DPRX capabilities failed.
*/
-int intel_dp_lttpr_init(struct intel_dp *intel_dp)
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
int lttpr_count;
bool ret;
int i;
- if (intel_dp_is_edp(intel_dp))
- return 0;
-
ret = intel_dp_read_lttpr_common_caps(intel_dp);
+
+ /* The DPTX shall read the DPRX caps after LTTPR detection. */
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+ intel_dp_reset_lttpr_common_caps(intel_dp);
+ return -EIO;
+ }
+
if (!ret)
return 0;
+ /*
+ * The 0xF0000-0xF02FF range is only valid if the DPCD revision is
+ * at least 1.4.
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
+ intel_dp_reset_lttpr_common_caps(intel_dp);
+ return 0;
+ }
+
lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
/*
* Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
@@ -172,7 +210,7 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
return lttpr_count;
}
-EXPORT_SYMBOL(intel_dp_lttpr_init);
+EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
static u8 dp_voltage_max(u8 preemph)
{
@@ -807,7 +845,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
* TODO: Reiniting LTTPRs here won't be needed once proper connector
* HW state readout is added.
*/
- int lttpr_count = intel_dp_lttpr_init(intel_dp);
+ int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+
+ if (lttpr_count < 0)
+ return;
if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 6a1f76bd8c75..9cb7c28027f0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -11,7 +11,7 @@
struct intel_crtc_state;
struct intel_dp;
-int intel_dp_lttpr_init(struct intel_dp *intel_dp);
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index f58cc5700784..a86c57d117f2 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -1014,20 +1014,14 @@ static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state)
{
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
- return DSS_CTL1;
-
- return ICL_PIPE_DSS_CTL1(pipe);
+ return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1;
}
static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state)
{
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
- return DSS_CTL2;
-
- return ICL_PIPE_DSS_CTL2(pipe);
+ return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2;
}
void intel_dsc_enable(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index a357bb431815..67de2b189598 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
WRITE_ONCE(fence->vma, NULL);
vma->fence = NULL;
- with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
+ /*
+ * Skip the write to HW if and only if the device is currently
+ * suspended.
+ *
+ * If the driver does not currently hold a wakeref (if_in_use == 0),
+ * the device may currently be runtime suspended, or it may be woken
+ * up before the suspend takes place. If the device is not suspended
+ * (powered down) and we skip clearing the fence register, the HW is
+ * left in an undefined state where we may end up with multiple
+ * registers overlapping.
+ */
+ with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
fence_write(fence);
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 153ca9e65382..8b725efb2254 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
}
/**
- * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
+ * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
* @rpm: the intel_runtime_pm structure
+ * @ignore_usecount: get a ref even if dev->power.usage_count is 0
*
* This function grabs a device-level runtime pm reference if the device is
- * already in use and ensures that it is powered up. It is illegal to try
- * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
+ * already active and ensures that it is powered up. It is illegal to try
+ * and access the HW should intel_runtime_pm_get_if_active() report failure.
+ *
+ * If @ignore_usecount=true, a reference will be acquired even if there is no
+ * user requiring the device to be powered up (dev->power.usage_count == 0).
+ * If the function returns false in this case then it's guaranteed that the
+ * device's runtime suspend hook has been called already or that it will be
+ * called (and hence it's also guaranteed that the device's runtime resume
+ * hook will be called eventually).
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
@@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
* as True if the wakeref was acquired, or False otherwise.
*/
-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
+static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
+ bool ignore_usecount)
{
if (IS_ENABLED(CONFIG_PM)) {
/*
@@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
- if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
+ if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
return 0;
}
@@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
return track_intel_runtime_pm_wakeref(rpm);
}
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
+{
+ return __intel_runtime_pm_get_if_active(rpm, false);
+}
+
+intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
+{
+ return __intel_runtime_pm_get_if_active(rpm, true);
+}
+
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @rpm: the intel_runtime_pm structure
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index ae64ff14c642..1e4ddd11c12b 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
@@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+#define with_intel_runtime_pm_if_active(rpm, wf) \
+ for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
+ intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index d1a9841adeed..e6a88c8cbd69 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev)
ret = drmm_mode_config_init(drm);
if (ret)
- return ret;
+ goto err_kms;
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index dbfe39e2f7f6..ffdc492c5bc5 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
drm_panel_prepare(imx_ldb_ch->panel);
if (dual) {
@@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
u32 bus_format = imx_ldb_ch->bus_format;
+ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+ return;
+ }
+
if (mode->clock > 170000) {
dev_warn(ldb->dev,
"%s: mode exceeds 170 MHz pixel clock\n", __func__);
@@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
struct imx_ldb_channel *channel = &imx_ldb->channel[i];
if (!channel->ldb)
- break;
+ continue;
ret = imx_ldb_register(drm, channel);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7e553d3efeb2..ce13d49e615b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1386,8 +1386,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
- REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 5ccc9da455a1..c35b06b46fcc 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
- else
+ else if (adreno_is_a540(adreno_gpu))
a540_lm_setup(gpu);
/* Set up SP/TP power collpase */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 71c917f909af..91cf46f84025 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -339,7 +339,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
else
bit = a6xx_gmu_oob_bits[state].ack_new;
- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
}
/* Enable CPU control of SPTP power power collapse */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index ba8e9d3cf0fe..d553f62f4eeb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -522,28 +522,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
-static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+/*
+ * Check that the microcode version is new enough to include several key
+ * security fixes. Return true if the ucode is safe.
+ */
+static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
struct drm_gem_object *obj)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
u32 *buf = msm_gem_get_vaddr(obj);
+ bool ret = false;
if (IS_ERR(buf))
- return;
+ return false;
/*
- * If the lowest nibble is 0xa that is an indication that this microcode
- * has been patched. The actual version is in dword [3] but we only care
- * about the patchlevel which is the lowest nibble of dword [3]
- *
- * Otherwise check that the firmware is greater than or equal to 1.90
- * which was the first version that had this fix built in
+ * Targets up to a640 (a618, a630 and a640) need to check for a
+ * microcode version that is patched to support the whereami opcode or
+ * one that is new enough to include it by default.
*/
- if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
- a6xx_gpu->has_whereami = true;
- else if ((buf[0] & 0xfff) > 0x190)
- a6xx_gpu->has_whereami = true;
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
+ adreno_is_a640(adreno_gpu)) {
+ /*
+ * If the lowest nibble is 0xa that is an indication that this
+ * microcode has been patched. The actual version is in dword
+ * [3] but we only care about the patchlevel which is the lowest
+ * nibble of dword [3]
+ *
+ * Otherwise check that the firmware is greater than or equal
+ * to 1.90 which was the first version that had this fix built
+ * in
+ */
+ if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
+ (buf[0] & 0xfff) >= 0x190) {
+ a6xx_gpu->has_whereami = true;
+ ret = true;
+ goto out;
+ }
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "a630 SQE ucode is too old. Have version %x need at least %x\n",
+ buf[0] & 0xfff, 0x190);
+ } else {
+ /*
+ * a650 tier targets don't need whereami but still need to be
+ * equal to or newer than 0.95 for other security fixes
+ */
+ if (adreno_is_a650(adreno_gpu)) {
+ if ((buf[0] & 0xfff) >= 0x095) {
+ ret = true;
+ goto out;
+ }
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "a650 SQE ucode is too old. Have version %x need at least %x\n",
+ buf[0] & 0xfff, 0x095);
+ }
+
+ /*
+ * When a660 is added those targets should return true here
+ * since those have all the critical security fixes built in
+ * from the start
+ */
+ }
+out:
msm_gem_put_vaddr(obj);
+ return ret;
}
static int a6xx_ucode_init(struct msm_gpu *gpu)
@@ -566,7 +611,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
}
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
- a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
+ if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_put(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ return -EPERM;
+ }
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -1177,8 +1228,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
/* Force the GPU power on so we can read this register */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
- *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
- REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
mutex_unlock(&perfcounter_oob);
@@ -1350,35 +1401,26 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
u32 revn)
{
struct opp_table *opp_table;
- struct nvmem_cell *cell;
u32 supp_hw = UINT_MAX;
- void *buf;
+ u16 speedbin;
+ int ret;
- cell = nvmem_cell_get(dev, "speed_bin");
+ ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
/*
* -ENOENT means that the platform doesn't support speedbin which is
* fine
*/
- if (PTR_ERR(cell) == -ENOENT)
+ if (ret == -ENOENT) {
return 0;
- else if (IS_ERR(cell)) {
- DRM_DEV_ERROR(dev,
- "failed to read speed-bin. Some OPPs may not be supported by hardware");
- goto done;
- }
-
- buf = nvmem_cell_read(cell, NULL);
- if (IS_ERR(buf)) {
- nvmem_cell_put(cell);
+ } else if (ret) {
DRM_DEV_ERROR(dev,
- "failed to read speed-bin. Some OPPs may not be supported by hardware");
+ "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
+ ret);
goto done;
}
+ speedbin = le16_to_cpu(speedbin);
- supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
-
- kfree(buf);
- nvmem_cell_put(cell);
+ supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
done:
opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 8981cfa9dbc3..92e6f1b94738 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -496,7 +496,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
- DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->merge_3d)
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ BIT(cfg->merge_3d - MERGE_3D_0));
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 5a8e3e1fc48c..85f2c3564c96 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -43,6 +43,8 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */
+
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@@ -931,6 +933,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
DPU_DEBUG("REG_DMA is not defined");
}
+ if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
+ dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
@@ -1032,9 +1037,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_vbif_init_memtypes(dpu_kms);
- if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
- dpu_kms_parse_data_bus_icc_path(dpu_kms);
-
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
@@ -1191,10 +1193,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
ddev = dpu_kms->dev;
+ WARN_ON(!(dpu_kms->num_paths));
/* Min vote of BW is required before turning on AXI clk */
for (i = 0; i < dpu_kms->num_paths; i++)
- icc_set_bw(dpu_kms->path[i], 0,
- dpu_kms->catalog->perf.min_dram_ib);
+ icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 1c6e1d2b947c..7c22bfe0fc7d 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -32,6 +32,8 @@ struct dp_aux_private {
struct drm_dp_aux dp_aux;
};
+#define MAX_AUX_RETRIES 5
+
static const char *dp_aux_get_error(u32 aux_error)
{
switch (aux_error) {
@@ -377,6 +379,11 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
+ if (aux->native) {
+ aux->retry_cnt++;
+ if (!(aux->retry_cnt % MAX_AUX_RETRIES))
+ dp_catalog_aux_update_cfg(aux->catalog);
+ }
usleep_range(400, 500); /* at least 400us to next try */
goto unlock_exit;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index a45fe95aff49..3dc65877fa10 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
break;
case MSM_DSI_PHY_7NM:
case MSM_DSI_PHY_7NM_V4_1:
- pll = msm_dsi_pll_7nm_init(pdev, id);
+ pll = msm_dsi_pll_7nm_init(pdev, type, id);
break;
default:
pll = ERR_PTR(-ENXIO);
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 3405982a092c..bbecb1de5678 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
}
#endif
#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id);
#else
static inline struct msm_dsi_pll *
-msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+msm_dsi_pll_7nm_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id)
{
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
index 93bf142e4a4e..e29b3bfd63d1 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
@@ -325,7 +325,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll)
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
- pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
@@ -509,6 +509,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+ struct dsi_pll_config *config = &pll_7nm->pll_configuration;
void __iomem *base = pll_7nm->mmio;
u64 ref_clk = pll_7nm->vco_ref_clk_rate;
u64 vco_rate = 0x0;
@@ -529,9 +530,8 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
/*
* TODO:
* 1. Assumes prescaler is disabled
- * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
*/
- multiplier = 1 << 18;
+ multiplier = 1 << config->frac_bits;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
@@ -852,7 +852,8 @@ err_base_clk_hw:
return ret;
}
-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id)
{
struct dsi_pll_7nm *pll_7nm;
struct msm_dsi_pll *pll;
@@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
pll = &pll_7nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
- if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
+ if (type == MSM_DSI_PHY_7NM_V4_1) {
pll->min_rate = 600000000UL;
pll->max_rate = (unsigned long)5000000000ULL;
/* workaround for max rate overflowing on 32-bit builds: */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 6a326761dc4a..edcaccaa27e6 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -57,10 +57,13 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
{
+ int crtc_index;
struct drm_crtc *crtc;
- for_each_crtc_mask(kms->dev, crtc, crtc_mask)
- mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]);
+ for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
+ crtc_index = drm_crtc_index(crtc);
+ mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
+ }
}
static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 94525ac76d4e..196907689c82 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -570,6 +570,7 @@ err_free_priv:
kfree(priv);
err_put_drm_dev:
drm_dev_put(ddev);
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -1072,6 +1073,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
static int __maybe_unused msm_pm_prepare(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+
+ if (!priv || !priv->kms)
+ return 0;
return drm_mode_config_helper_suspend(ddev);
}
@@ -1079,6 +1084,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
static void __maybe_unused msm_pm_complete(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+
+ if (!priv || !priv->kms)
+ return;
drm_mode_config_helper_resume(ddev);
}
@@ -1311,6 +1320,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
static void msm_pdev_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
+
+ if (!priv || !priv->kms)
+ return;
drm_atomic_helper_shutdown(drm);
}
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index ad2703698b05..cd59a5918038 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
int ret;
if (fence > fctx->last_fence) {
- DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+ DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
fctx->name, fence, fctx->last_fence);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 4735251a394d..d8151a89e163 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -157,7 +157,6 @@ struct msm_kms {
* from the crtc's pending_timer close to end of the frame:
*/
struct mutex commit_lock[MAX_CRTCS];
- struct lock_class_key commit_lock_keys[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
};
@@ -167,11 +166,8 @@ static inline int msm_kms_init(struct msm_kms *kms,
{
unsigned i, ret;
- for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
- lockdep_register_key(&kms->commit_lock_keys[i]);
- __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
- &kms->commit_lock_keys[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
+ mutex_init(&kms->commit_lock[i]);
kms->funcs = funcs;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 196612addfd6..1c9c0cdf85db 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2693,9 +2693,20 @@ nv50_display_create(struct drm_device *dev)
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
- if (disp->disp->object.oclass >= GK104_DISP) {
+ /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
+ * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
+ * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
+ * small page allocations in prepare_fb(). When this is implemented, we should also force
+ * large pages (128K) for ovly fbs in order to fix Kepler ovlys.
+ * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
+ * large pages.
+ */
+ if (disp->disp->object.oclass >= GM107_DISP) {
dev->mode_config.cursor_width = 256;
dev->mode_config.cursor_height = 256;
+ } else if (disp->disp->object.oclass >= GK104_DISP) {
+ dev->mode_config.cursor_width = 128;
+ dev->mode_config.cursor_height = 128;
} else {
dev->mode_config.cursor_width = 64;
dev->mode_config.cursor_height = 64;
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index af381d756ac1..5fbfb71ca3d9 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -37,6 +37,7 @@ struct dsic_panel_data {
u32 height_mm;
u32 max_hs_rate;
u32 max_lp_rate;
+ bool te_support;
};
struct panel_drv_data {
@@ -334,9 +335,11 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r)
goto err;
- r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (r)
- goto err;
+ if (ddata->panel_data->te_support) {
+ r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (r)
+ goto err;
+ }
/* possible panel bug */
msleep(100);
@@ -619,6 +622,7 @@ static const struct dsic_panel_data taal_data = {
.height_mm = 0,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
+ .te_support = true,
};
static const struct dsic_panel_data himalaya_data = {
@@ -629,6 +633,7 @@ static const struct dsic_panel_data himalaya_data = {
.height_mm = 88,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
+ .te_support = false,
};
static const struct dsic_panel_data droid4_data = {
@@ -639,6 +644,7 @@ static const struct dsic_panel_data droid4_data = {
.height_mm = 89,
.max_hs_rate = 300000000,
.max_lp_rate = 10000000,
+ .te_support = false,
};
static const struct of_device_id dsicm_of_match[] = {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e8c66d10478f..78893bea85ae 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -364,7 +364,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
/* check that we only pin down anonymous memory
to prevent problems with writeback */
- unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+ unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
if (!vma || vma->vm_file || vma->vm_end < end)
@@ -386,7 +386,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
} while (pinned < ttm->num_pages);
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
- ttm->num_pages << PAGE_SHIFT,
+ (u64)ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (r)
goto release_sg;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index ba8c6038cd63..ca3761772211 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -48,21 +48,12 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
};
-static void rcar_du_encoder_release(struct drm_device *dev, void *res)
-{
- struct rcar_du_encoder *renc = res;
-
- drm_encoder_cleanup(&renc->base);
- kfree(renc);
-}
-
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
struct drm_bridge *bridge;
- int ret;
/*
* Locate the DRM bridge from the DT node. For the DPAD outputs, if the
@@ -101,26 +92,16 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
return -ENOLINK;
}
- renc = kzalloc(sizeof(*renc), GFP_KERNEL);
- if (renc == NULL)
- return -ENOMEM;
-
- renc->output = output;
-
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
enc_node, output);
- ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
- if (ret < 0) {
- kfree(renc);
- return ret;
- }
+ renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
+ &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
+ NULL);
+ if (!renc)
+ return -ENOMEM;
- ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release,
- renc);
- if (ret)
- return ret;
+ renc->output = output;
/*
* Attach the bridge to the encoder. The bridge will create the
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 0ae3a025efe9..134986dc2783 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
dev_err(dc->dev,
"failed to set clock rate to %lu Hz\n",
state->pclk);
+
+ err = clk_set_rate(dc->clk, state->pclk);
+ if (err < 0)
+ dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+ dc->clk, state->pclk, err);
}
DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
@@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
}
-
- err = clk_set_rate(dc->clk, state->pclk);
- if (err < 0)
- dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
- dc->clk, state->pclk, err);
}
static void tegra_dc_stop(struct tegra_dc *dc)
@@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc)
* POWER_CONTROL registers during CRTC enabling.
*/
if (dc->soc->coupled_pm && dc->pipe == 1) {
- u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
- struct device_link *link;
- struct device *partner;
+ struct device *companion;
+ struct tegra_dc *parent;
- partner = driver_find_device(dc->dev->driver, NULL, NULL,
- tegra_dc_match_by_pipe);
- if (!partner)
+ companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
+ tegra_dc_match_by_pipe);
+ if (!companion)
return -EPROBE_DEFER;
- link = device_link_add(dc->dev, partner, flags);
- if (!link) {
- dev_err(dc->dev, "failed to link controllers\n");
- return -EINVAL;
- }
+ parent = dev_get_drvdata(companion);
+ dc->client.parent = &parent->client;
- dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner));
+ dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
}
return 0;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index f02a035dda45..7b88261f57bb 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client)
* kernel is possible.
*/
if (sor->rst) {
+ err = pm_runtime_resume_and_get(sor->dev);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
err = reset_control_acquire(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client)
}
reset_control_release(sor->rst);
+ pm_runtime_put(sor->dev);
}
err = clk_prepare_enable(sor->clk_safe);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 269390bc586e..76657dcdf9b0 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
{
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
+ struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
u32 fifo_len_bytes = pv_data->fifo_depth;
/*
@@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
if (crtc_data->hvs_output == 5)
return 32;
+ /*
+ * It looks like in some situations, we will overflow
+ * the PixelValve FIFO (with the bit 10 of PV stat being
+ * set) and stall the HVS / PV, eventually resulting in
+ * a page flip timeout.
+ *
+ * Displaying the video overlay during a playback with
+ * Kodi on an RPi3 seems to be a great solution with a
+ * failure rate around 50%.
+ *
+ * Removing 1 from the FIFO full level however
+ * seems to completely remove that issue.
+ */
+ if (!vc4->hvs->hvs5)
+ return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
+
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 7322169c0682..1e9c84cf614a 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1146,7 +1146,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_y = state->src_y;
plane->state->src_w = state->src_w;
plane->state->src_h = state->src_h;
- plane->state->src_h = state->src_h;
plane->state->alpha = state->alpha;
plane->state->pixel_blend_mode = state->pixel_blend_mode;
plane->state->rotation = state->rotation;
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 30d9adf31c84..9f14d99c763c 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -521,7 +521,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
if (IS_ERR(drm_dev)) {
ret = PTR_ERR(drm_dev);
- goto fail;
+ goto fail_dev;
}
drm_info->drm_dev = drm_dev;
@@ -551,8 +551,10 @@ fail_modeset:
drm_kms_helper_poll_fini(drm_dev);
drm_mode_config_cleanup(drm_dev);
drm_dev_put(drm_dev);
-fail:
+fail_dev:
kfree(drm_info);
+ front_info->drm_info = NULL;
+fail:
return ret;
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h
index 3adacba9a23b..e5f4314899ee 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_conn.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.h
@@ -16,7 +16,6 @@
struct drm_connector;
struct xen_drm_front_drm_info;
-struct xen_drm_front_drm_info;
int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
struct drm_connector *connector);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 347fb962b6c9..68a766ff0e9d 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)
EXPORT_SYMBOL(host1x_driver_unregister);
/**
- * host1x_client_register() - register a host1x client
+ * __host1x_client_register() - register a host1x client
* @client: host1x client
+ * @key: lock class key for the client-specific mutex
*
* Registers a host1x client with each host1x controller instance. Note that
* each client will only match their parent host1x controller and will only be
@@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);
* device and call host1x_device_init(), which will in turn call each client's
* &host1x_client_ops.init implementation.
*/
-int host1x_client_register(struct host1x_client *client)
+int __host1x_client_register(struct host1x_client *client,
+ struct lock_class_key *key)
{
struct host1x *host1x;
int err;
INIT_LIST_HEAD(&client->list);
- mutex_init(&client->lock);
+ __mutex_init(&client->lock, "host1x client lock", key);
client->usecount = 0;
mutex_lock(&devices_lock);
@@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client)
return 0;
}
-EXPORT_SYMBOL(host1x_client_register);
+EXPORT_SYMBOL(__host1x_client_register);
/**
* host1x_client_unregister() - unregister a host1x client
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0abce004a959..65e3e7df8a4b 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
[LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
- .len = sizeof(struct rdma_nla_ls_gid)},
+ .len = sizeof(struct rdma_nla_ls_gid),
+ .validation_type = NLA_VALIDATE_MIN,
+ .min = sizeof(struct rdma_nla_ls_gid)},
};
static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 81903749d241..e42c812e74c3 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.dev->rdev.lldi.rxq_ids[0], true);
+ ep->com.dev->rdev.lldi.rxq_ids[0],
+ ep->com.local_addr.ss_family == AF_INET6);
if (err)
goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 2a91b8d95e12..04b1e8f021f6 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
*/
int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{
- int node = pcibus_to_node(dd->pcidev->bus);
struct hfi1_affinity_node *entry;
const struct cpumask *local_mask;
int curr_cpu, possible, i, ret;
bool new_entry = false;
- /*
- * If the BIOS does not have the NUMA node information set, select
- * NUMA 0 so we get consistent performance.
- */
- if (node < 0) {
- dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
- node = 0;
- }
- dd->node = node;
-
local_mask = cpumask_of_node(dd->node);
if (cpumask_first(local_mask) >= nr_cpu_ids)
local_mask = topology_core_cpumask(0);
@@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
* create an entry in the global affinity structure and initialize it.
*/
if (!entry) {
- entry = node_affinity_allocate(node);
+ entry = node_affinity_allocate(dd->node);
if (!entry) {
dd_dev_err(dd,
"Unable to allocate global affinity node\n");
@@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (new_entry)
node_affinity_add_tail(entry);
+ dd->affinity_entry = entry;
mutex_unlock(&node_affinity.lock);
return 0;
@@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
{
struct hfi1_affinity_node *entry;
- if (dd->node < 0)
- return;
-
mutex_lock(&node_affinity.lock);
+ if (!dd->affinity_entry)
+ goto unlock;
entry = node_affinity_lookup(dd->node);
if (!entry)
goto unlock;
@@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
*/
_dev_comp_vect_cpu_mask_clean_up(dd, entry);
unlock:
+ dd->affinity_entry = NULL;
mutex_unlock(&node_affinity.lock);
- dd->node = NUMA_NO_NODE;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index e09e8244a94c..2a9a040569eb 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1409,6 +1409,7 @@ struct hfi1_devdata {
spinlock_t irq_src_lock;
int vnic_num_vports;
struct net_device *dummy_netdev;
+ struct hfi1_affinity_node *affinity_entry;
/* Keeps track of IPoIB RSM rule users */
atomic_t ipoib_rsm_usr_num;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index cb7ad1288821..786c6316273f 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
dd->pport = (struct hfi1_pportdata *)(dd + 1);
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
- dd->node = NUMA_NO_NODE;
ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
GFP_KERNEL);
@@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail;
}
rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+ /*
+ * If the BIOS does not have the NUMA node information set, select
+ * NUMA 0 so we get consistent performance.
+ */
+ dd->node = pcibus_to_node(pdev->bus);
+ if (dd->node == NUMA_NO_NODE) {
+ dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
+ dd->node = 0;
+ }
/*
* Initialize all locks for the device. This needs to be as early as
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 1fb6e1a0e4e1..1bcab992ac26 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
return 0;
}
- cpumask_and(node_cpu_mask, cpu_mask,
- cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
+ cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
available_cpus = cpumask_weight(node_cpu_mask);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 0eb6a7a618e0..9ea542270ed4 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
* TGT QP isn't associated with RQ/SQ
*/
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
- (attrs->qp_type != IB_QPT_XRC_TGT)) {
+ (attrs->qp_type != IB_QPT_XRC_TGT) &&
+ (attrs->qp_type != IB_QPT_XRC_INI)) {
struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 0a08b4b742a3..6734329cca33 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -2720,8 +2720,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
/* Now it is safe to iterate over all paths without locks */
list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
- rtrs_clt_destroy_sess_files(sess, NULL);
rtrs_clt_close_conns(sess, true);
+ rtrs_clt_destroy_sess_files(sess, NULL);
kobject_put(&sess->kobj);
}
free_clt(clt);
diff --git a/drivers/interconnect/bulk.c b/drivers/interconnect/bulk.c
index 73e2c8d0a412..448cc536aa79 100644
--- a/drivers/interconnect/bulk.c
+++ b/drivers/interconnect/bulk.c
@@ -53,7 +53,7 @@ void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
EXPORT_SYMBOL_GPL(icc_bulk_put);
/**
- * icc_bulk_set() - set bandwidth to a set of paths
+ * icc_bulk_set_bw() - set bandwidth to a set of paths
* @num_paths: the number of icc_bulk_data
* @paths: the icc_bulk_data table containing the paths and bandwidth
*
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 5ad519c9f239..8a1e70e00876 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -942,6 +942,8 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
GFP_KERNEL);
if (new)
src->links = new;
+ else
+ ret = -ENOMEM;
out:
mutex_unlock(&icc_lock);
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
index dfbec30ed149..20f31a1b4192 100644
--- a/drivers/interconnect/qcom/msm8939.c
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -131,7 +131,7 @@ DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8939_MASTER_SDCC_1, 8, -1, -1, MSM8939_PNOC_IN
DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1);
DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, 20, -1, MSM8939_SLAVE_SRVC_SNOC);
+DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, -1, -1, MSM8939_SLAVE_SRVC_SNOC);
DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0);
DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1);
@@ -156,14 +156,14 @@ DEFINE_QNODE(pcnoc_snoc_mas, MSM8939_PNOC_SNOC_MAS, 8, 29, -1, MSM8939_PNOC_SNOC
DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1);
DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC);
DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0);
-DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, -1, 0);
DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0);
DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0);
DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0);
DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0);
DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, 106, 0);
-DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, -1, 0);
+DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, -1, 0);
DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0);
DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0);
@@ -187,20 +187,20 @@ DEFINE_QNODE(slv_sdcc_2, MSM8939_SLAVE_SDCC_2, 4, -1, -1, 0);
DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0);
DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0);
DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0);
DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0);
DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0);
DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0);
DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, 3, -1, MSM8939_SNOC_BIMC_0_SLV);
-DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, 24, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_0_SLV);
+DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV);
DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0);
DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV);
DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS);
-DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, 100, 131, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, -1, -1, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS);
DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV);
DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 5e306bba4375..1ca65b434f1f 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
* Grab our output buffer.
*/
nl = orig_nl = get_result_buffer(param, param_size, &len);
- if (len < needed) {
+ if (len < needed || len < sizeof(nl->dev)) {
param->flags |= DM_BUFFER_FULL_FLAG;
goto out;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 95391f78b8d5..e5f0f1703c5d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1594,6 +1594,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
return blk_queue_zoned_model(q) != *zoned_model;
}
+/*
+ * Check the device zoned model based on the target feature flag. If the target
+ * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
+ * also accepted but all devices must have the same zoned model. If the target
+ * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
+ * zoned model with all zoned devices having the same zone size.
+ */
static bool dm_table_supports_zoned_model(struct dm_table *t,
enum blk_zoned_model zoned_model)
{
@@ -1603,13 +1610,15 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
- if (zoned_model == BLK_ZONED_HM &&
- !dm_target_supports_zoned_hm(ti->type))
- return false;
-
- if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
- return false;
+ if (dm_target_supports_zoned_hm(ti->type)) {
+ if (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti, device_not_zoned_model,
+ &zoned_model))
+ return false;
+ } else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
+ if (zoned_model == BLK_ZONED_HM)
+ return false;
+ }
}
return true;
@@ -1621,9 +1630,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
struct request_queue *q = bdev_get_queue(dev->bdev);
unsigned int *zone_sectors = data;
+ if (!blk_queue_is_zoned(q))
+ return 0;
+
return blk_queue_zone_sectors(q) != *zone_sectors;
}
+/*
+ * Check consistency of zoned model and zone sectors across all targets. For
+ * zone sectors, if the destination device is a zoned block device, it shall
+ * have the specified zone_sectors.
+ */
static int validate_hardware_zoned_model(struct dm_table *table,
enum blk_zoned_model zoned_model,
unsigned int zone_sectors)
@@ -1642,7 +1659,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
return -EINVAL;
if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
- DMERR("%s: zone sectors is not consistent across all devices",
+ DMERR("%s: zone sectors is not consistent across all zoned devices",
dm_device_name(table->md));
return -EINVAL;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 6b8e5bdd8526..808a98ef624c 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -34,7 +34,7 @@
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
-#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC + \
+#define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 697f9de37355..7e88df64d197 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
static struct target_type dmz_type = {
.name = "zoned",
.version = {2, 0, 0},
- .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
+ .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
.module = THIS_MODULE,
.ctr = dmz_ctr,
.dtr = dmz_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 50b693d776d6..3f3be9408afa 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2036,7 +2036,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
- set_capacity_and_notify(md->disk, size);
+ if (!get_capacity(md->disk))
+ set_capacity(md->disk, size);
+ else
+ set_capacity_and_notify(md->disk, size);
dm_table_event_callback(t, event_callback, md);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 4378a9b25848..2cc370adb238 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -2286,8 +2286,8 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
if (buffer_id == 0)
return -EINVAL;
- if (!mei_cl_is_connected(cl))
- return -ENODEV;
+ if (mei_cl_is_connected(cl))
+ return -EPROTO;
if (cl->dma_mapped)
return -EPROTO;
@@ -2327,9 +2327,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
- cl->dma_mapped ||
- cl->status ||
- !mei_cl_is_connected(cl),
+ cl->dma_mapped || cl->status,
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
@@ -2376,8 +2374,9 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
return -EOPNOTSUPP;
}
- if (!mei_cl_is_connected(cl))
- return -ENODEV;
+ /* do not allow unmap for connected client */
+ if (mei_cl_is_connected(cl))
+ return -EPROTO;
if (!cl->dma_mapped)
return -EPROTO;
@@ -2405,9 +2404,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
- !cl->dma_mapped ||
- cl->status ||
- !mei_cl_is_connected(cl),
+ !cl->dma_mapped || cl->status,
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 80ab1593ca31..492f1bcb0516 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -314,6 +314,18 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
return ret;
}
+static int mcp251x_spi_write(struct spi_device *spi, int len)
+{
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+ int ret;
+
+ ret = spi_write(spi, priv->spi_tx_buf, len);
+ if (ret)
+ dev_err(&spi->dev, "spi write failed: ret = %d\n", ret);
+
+ return ret;
+}
+
static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -361,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = val;
- mcp251x_spi_trans(spi, 3);
+ mcp251x_spi_write(spi, 3);
}
static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
@@ -373,7 +385,7 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
priv->spi_tx_buf[2] = v1;
priv->spi_tx_buf[3] = v2;
- mcp251x_spi_trans(spi, 4);
+ mcp251x_spi_write(spi, 4);
}
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -386,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
- mcp251x_spi_trans(spi, 4);
+ mcp251x_spi_write(spi, 4);
}
static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -618,7 +630,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
buf[i]);
} else {
memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
- mcp251x_spi_trans(spi, TXBDAT_OFF + len);
+ mcp251x_spi_write(spi, TXBDAT_OFF + len);
}
}
@@ -650,7 +662,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
/* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
- mcp251x_spi_trans(priv->spi, 1);
+ mcp251x_spi_write(priv->spi, 1);
}
static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
@@ -888,7 +900,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
mdelay(MCP251X_OST_DELAY_MS);
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
- ret = mcp251x_spi_trans(spi, 1);
+ ret = mcp251x_spi_write(spi, 1);
if (ret)
return ret;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index ad006edf474d..e69b005be068 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -861,7 +861,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
if (dev->adapter->dev_set_bus) {
err = dev->adapter->dev_set_bus(dev, 0);
if (err)
- goto lbl_unregister_candev;
+ goto adap_dev_free;
}
/* get device number early */
@@ -873,6 +873,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
return 0;
+adap_dev_free:
+ if (dev->adapter->dev_free)
+ dev->adapter->dev_free(dev);
+
lbl_unregister_candev:
unregister_candev(netdev);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 26d0e3bb5dea..314ae78bbdd6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -93,8 +93,12 @@
/* GSWIP MII Registers */
#define GSWIP_MII_CFGp(p) (0x2 * (p))
+#define GSWIP_MII_CFG_RESET BIT(15)
#define GSWIP_MII_CFG_EN BIT(14)
+#define GSWIP_MII_CFG_ISOLATE BIT(13)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
+#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
+#define GSWIP_MII_CFG_RMII_CLK BIT(7)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
#define GSWIP_MII_CFG_MODE_MIIM 0x1
#define GSWIP_MII_CFG_MODE_RMIIP 0x2
@@ -191,6 +195,23 @@
#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
#define GSWIP_MAC_FLEN 0x8C5
+#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
+#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
+#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
+#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
+#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
+#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
+#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
+#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
+#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
+#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
+#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
+#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
+#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
@@ -655,16 +676,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
GSWIP_SDMA_PCTRLp(port));
if (!dsa_is_cpu_port(ds, port)) {
- u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
- GSWIP_MDIO_PHY_SPEED_AUTO |
- GSWIP_MDIO_PHY_FDUP_AUTO |
- GSWIP_MDIO_PHY_FCONTX_AUTO |
- GSWIP_MDIO_PHY_FCONRX_AUTO |
- (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
-
- gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port));
- /* Activate MDIO auto polling */
- gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
+ u32 mdio_phy = 0;
+
+ if (phydev)
+ mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
}
return 0;
@@ -677,14 +695,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
if (!dsa_is_user_port(ds, port))
return;
- if (!dsa_is_cpu_port(ds, port)) {
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
- GSWIP_MDIO_PHY_LINK_MASK,
- GSWIP_MDIO_PHYp(port));
- /* Deactivate MDIO auto polling */
- gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
- }
-
gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
@@ -796,14 +806,32 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
- /* disable PHY auto polling */
+ /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
+ * interoperability problem with this auto polling mechanism because
+ * their status registers think that the link is in a different state
+ * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
+ * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
+ * auto polling state machine consider the link being negotiated with
+ * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
+ * to the switch port being completely dead (RX and TX are both not
+ * working).
+ * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
+ * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
+ * it would work fine for a few minutes to hours and then stop, on
+ * other device it would no traffic could be sent or received at all.
+ * Testing shows that when PHY auto polling is disabled these problems
+ * go away.
+ */
gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
+
/* Configure the MDIO Clock 2.5 MHz */
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
- /* Disable the xMII link */
+ /* Disable the xMII interface and clear it's isolation bit */
for (i = 0; i < priv->hw_info->max_ports; i++)
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
+ 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1498,6 +1526,112 @@ unsupported:
phy_modes(state->interface), port);
}
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
+{
+ u32 mdio_phy;
+
+ if (link)
+ mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
+ else
+ mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
+ phy_interface_t interface)
+{
+ u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
+
+ switch (speed) {
+ case SPEED_10:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_100:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M25;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_1000:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
+
+ mii_cfg = GSWIP_MII_CFG_RATE_M125;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
+ break;
+ }
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
+ GSWIP_MAC_CTRL_0p(port));
+}
+
+static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (duplex == DUPLEX_FULL) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
+ }
+
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
+ GSWIP_MAC_CTRL_0p(port));
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_pause(struct gswip_priv *priv, int port,
+ bool tx_pause, bool rx_pause)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (tx_pause && rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else if (tx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ } else if (rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ }
+
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
+ mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
+ gswip_mdio_mask(priv,
+ GSWIP_MDIO_PHY_FCONTX_MASK |
+ GSWIP_MDIO_PHY_FCONRX_MASK,
+ mdio_phy, GSWIP_MDIO_PHYp(port));
+}
+
static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
@@ -1517,6 +1651,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
break;
case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+
+ /* Configure the RMII clock as output: */
+ miicfg |= GSWIP_MII_CFG_RMII_CLK;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1532,7 +1669,11 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
"Unsupported interface: %d\n", state->interface);
return;
}
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
+
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
+ GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
+ miicfg, port);
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1557,6 +1698,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+
+ if (!dsa_is_cpu_port(ds, port))
+ gswip_port_set_link(priv, port, false);
}
static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -1568,6 +1712,13 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
+ if (!dsa_is_cpu_port(ds, port)) {
+ gswip_port_set_link(priv, port, true);
+ gswip_port_set_speed(priv, port, speed, interface);
+ gswip_port_set_duplex(priv, port, duplex);
+ gswip_port_set_pause(priv, port, tx_pause, rx_pause);
+ }
+
gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 187b0b9a6e1d..f78daba60b35 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- ioaddr = pci_resource_start(pdev, 0);
- if (!ioaddr) {
+ if (!pci_resource_len(pdev, 0)) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("card has no PCI IO resources, aborting\n");
err = -ENODEV;
@@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
goto err_disable_dev;
}
+
+ ioaddr = pci_resource_start(pdev, 0);
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("io address range already allocated\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ba8321ec1ee7..3305979a9f7c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -180,9 +180,9 @@
#define XGBE_DMA_SYS_AWCR 0x30303030
/* DMA cache settings - PCI device */
-#define XGBE_DMA_PCI_ARCR 0x00000003
-#define XGBE_DMA_PCI_AWCR 0x13131313
-#define XGBE_DMA_PCI_AWARCR 0x00000313
+#define XGBE_DMA_PCI_ARCR 0x000f0f0f
+#define XGBE_DMA_PCI_AWCR 0x0f0f0f0f
+#define XGBE_DMA_PCI_AWARCR 0x00000f0f
/* DMA channel interrupt modes */
#define XGBE_IRQ_MODE_EDGE 0
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index cbfed1d1477b..b7afac5c7ca7 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -181,6 +181,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
err_free_buf_descs:
dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
+ ring->cpu_addr = NULL;
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index f56f3dbbc015..ffd56a23f8b0 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3269,6 +3269,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
bool cmp_b = false;
bool cmp_c = false;
+ if (!macb_is_gem(bp))
+ return;
+
tp4sp_v = &(fs->h_u.tcp_ip4_spec);
tp4sp_m = &(fs->m_u.tcp_ip4_spec);
@@ -3637,6 +3640,7 @@ static void macb_restore_features(struct macb *bp)
{
struct net_device *netdev = bp->dev;
netdev_features_t features = netdev->features;
+ struct ethtool_rx_fs_item *item;
/* TX checksum offload */
macb_set_txcsum_feature(bp, features);
@@ -3645,6 +3649,9 @@ static void macb_restore_features(struct macb *bp)
macb_set_rxcsum_feature(bp, features);
/* RX Flow Filters */
+ list_for_each_entry(item, &bp->rx_fs_list.list, list)
+ gem_prog_cmp_regs(bp, &item->fs);
+
macb_set_rxflow_feature(bp, features);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index d2ba40c19696..a7f291c89702 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer temp_buff = { 0 };
struct sge_qbase_reg_field *sge_qbase;
struct ireg_buf *ch_sge_dbg;
+ u8 padap_running = 0;
int i, rc;
+ u32 size;
- rc = cudbg_get_buff(pdbg_init, dbg_buff,
- sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
- &temp_buff);
+ /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
+ * lead to SGE missing doorbells under heavy traffic. So, only
+ * collect them when adapter is idle.
+ */
+ for_each_port(padap, i) {
+ padap_running = netif_running(padap->port[i]);
+ if (padap_running)
+ break;
+ }
+
+ size = sizeof(*ch_sge_dbg) * 2;
+ if (!padap_running)
+ size += sizeof(*sge_qbase);
+
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
ch_sge_dbg++;
}
- if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
+ !padap_running) {
sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
/* 1 addr reg SGE_QBASE_INDEX and 4 data reg
* SGE_QBASE_MAP[0-3]
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 98829e482bfa..80882cfc370f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1190, 0x1194,
0x11a0, 0x11a4,
0x11b0, 0x11b4,
- 0x11fc, 0x1274,
+ 0x11fc, 0x123c,
+ 0x1254, 0x1274,
0x1280, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 1cf8ef717453..3ec4d9fddd52 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -363,7 +363,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
static int gfar_set_mac_addr(struct net_device *dev, void *p)
{
- eth_mac_addr(dev, p);
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (ret)
+ return ret;
gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c446b63be503..1c17fdc780e9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -4118,7 +4118,6 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
* normalcy is to reset.
* 2. A new reset request from the stack due to timeout
*
- * For the first case,error event might not have ae handle available.
* check if this is a new reset request and we are not here just because
* last reset attempt did not succeed and watchdog hit us again. We will
* know this if last reset request did not occur very recently (watchdog
@@ -4128,14 +4127,14 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
* want to make sure we throttle the reset request. Therefore, we will
* not allow it again before 3*HZ times.
*/
- if (!handle)
- handle = &hdev->vport[0].nic;
if (time_before(jiffies, (hdev->last_reset_time +
HCLGE_RESET_INTERVAL))) {
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- } else if (hdev->default_reset_request) {
+ }
+
+ if (hdev->default_reset_request) {
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
@@ -11791,7 +11790,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
if (ret)
return ret;
- /* RSS indirection table has been configuared by user */
+ /* RSS indirection table has been configured by user */
if (rxfh_configured)
goto out;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index c7d5c1726499..07aa26ba0966 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2226,7 +2226,7 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) {
- /* PF has initmated that it is about to reset the hardware.
+ /* PF has intimated that it is about to reset the hardware.
* We now have to poll & check if hardware has actually
* completed the reset sequence. On hardware reset completion,
* VF needs to reset the client and ae device.
@@ -2656,14 +2656,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
hclgevf_update_link_mode(hdev);
- clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
-
return 0;
}
@@ -3526,7 +3526,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
if (ret)
return ret;
- /* RSS indirection table has been configuared by user */
+ /* RSS indirection table has been configured by user */
if (rxfh_configured)
goto out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index cd53981fa5e0..15f93b355099 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -142,6 +142,7 @@ enum i40e_state_t {
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index e8230da29f05..291e61ac3e44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -578,6 +578,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
case RING_TYPE_XDP:
ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
break;
+ default:
+ ring = NULL;
+ break;
}
if (!ring)
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c4c167650b6b..040a01400b85 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
I40E_STAT(struct i40e_vsi, _name, _stat)
#define I40E_VEB_STAT(_name, _stat) \
I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_VEB_TC_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat)
#define I40E_QUEUE_STAT(_name, _stat) \
@@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
};
+struct i40e_cp_veb_tc_stats {
+ u64 tc_rx_packets;
+ u64 tc_rx_bytes;
+ u64 tc_tx_packets;
+ u64 tc_tx_bytes;
+};
+
static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
- I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
- I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
- I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
- I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+ I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
+ I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
+ I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
+ I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
};
static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
/* Set flow control settings */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
@@ -2217,6 +2227,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
}
/**
+ * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
+ * @tc: the TC statistics in VEB structure (veb->tc_stats)
+ * @i: the index of traffic class in (veb->tc_stats) structure to copy
+ *
+ * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
+ * one dimensional structure i40e_cp_veb_tc_stats.
+ * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
+ * statistics for the given TC.
+ **/
+static struct i40e_cp_veb_tc_stats
+i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
+{
+ struct i40e_cp_veb_tc_stats veb_tc = {
+ .tc_rx_packets = tc->tc_rx_packets[i],
+ .tc_rx_bytes = tc->tc_rx_bytes[i],
+ .tc_tx_packets = tc->tc_tx_packets[i],
+ .tc_tx_bytes = tc->tc_tx_bytes[i],
+ };
+
+ return veb_tc;
+}
+
+/**
* i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
* @pf: the PF device structure
* @i: the priority value to copy
@@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
i40e_gstrings_veb_stats);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
- i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
- i40e_gstrings_veb_tc_stats);
+ if (veb_stats) {
+ struct i40e_cp_veb_tc_stats veb_tc =
+ i40e_get_veb_tc_stats(&veb->tc_stats, i);
+
+ i40e_add_ethtool_stats(&data, &veb_tc,
+ i40e_gstrings_veb_tc_stats);
+ } else {
+ i40e_add_ethtool_stats(&data, NULL,
+ i40e_gstrings_veb_tc_stats);
+ }
i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
@@ -5433,7 +5474,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- true, addr, offset, &value, NULL);
+ addr, true, offset, &value, NULL);
if (status)
return -EIO;
data[i] = value;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1555d6009bf5..9502e043a0b7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2560,8 +2560,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
- dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
- vsi->netdev->name,
+ dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
cur_multipromisc ? "entering" : "leaving");
}
}
@@ -6738,9 +6737,9 @@ out:
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
}
- /* registers are set, lets apply */
- if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
- ret = i40e_hw_set_dcb_config(pf, new_cfg);
+ /* registers are set, lets apply */
+ if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+ ret = i40e_hw_set_dcb_config(pf, new_cfg);
}
err:
@@ -10573,12 +10572,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
goto end_core_reset;
}
- if (!lock_acquired)
- rtnl_lock();
- ret = i40e_setup_pf_switch(pf, reinit);
- if (ret)
- goto end_unlock;
-
#ifdef CONFIG_I40E_DCB
/* Enable FW to write a default DCB config on link-up
* unless I40E_FLAG_TC_MQPRIO was enabled or DCB
@@ -10593,7 +10586,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_aq_set_dcb_parameters(hw, false, NULL);
dev_warn(&pf->pdev->dev,
"DCB is not supported for X710-T*L 2.5/5G speeds\n");
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
} else {
i40e_aq_set_dcb_parameters(hw, true, NULL);
ret = i40e_init_pf_dcb(pf);
@@ -10607,6 +10600,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
#endif /* CONFIG_I40E_DCB */
+ if (!lock_acquired)
+ rtnl_lock();
+ ret = i40e_setup_pf_switch(pf, reinit);
+ if (ret)
+ goto end_unlock;
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -15140,12 +15138,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
* in order to register the netdev
*/
v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
- if (v_idx < 0)
+ if (v_idx < 0) {
+ err = v_idx;
goto err_switch_setup;
+ }
pf->lan_vsi = v_idx;
vsi = pf->vsi[v_idx];
- if (!vsi)
+ if (!vsi) {
+ err = -EFAULT;
goto err_switch_setup;
+ }
vsi->alloc_queue_pairs = 1;
err = i40e_config_netdev(vsi);
if (err)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index fc20afc23bfa..121cd99fdeff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2295,8 +2295,7 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
* @rx_ring: Rx ring being processed
* @xdp: XDP buffer containing the frame
**/
-static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
@@ -2335,7 +2334,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
}
xdp_out:
rcu_read_unlock();
- return ERR_PTR(-result);
+ return result;
}
/**
@@ -2448,6 +2447,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int xdp_xmit = 0;
bool failure = false;
struct xdp_buff xdp;
+ int xdp_res = 0;
#if (PAGE_SIZE < 8192)
frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
@@ -2513,12 +2513,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
- skb = i40e_run_xdp(rx_ring, &xdp);
+ xdp_res = i40e_run_xdp(rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
xdp_xmit |= xdp_res;
i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1b6ec9be155a..5d301a466f5c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
**/
static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
{
+ struct i40e_pf *pf = vf->pf;
int i;
i40e_vc_notify_vf_reset(vf);
@@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
* ensure a reset.
*/
for (i = 0; i < 20; i++) {
+ /* If PF is in VFs releasing state reset VF is impossible,
+ * so leave it.
+ */
+ if (test_bit(__I40E_VFS_RELEASING, pf->state))
+ return;
if (i40e_reset_vf(vf, false))
return;
usleep_range(10000, 20000);
@@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf)
return;
+
+ set_bit(__I40E_VFS_RELEASING, pf->state);
while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
usleep_range(1000, 2000);
@@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
}
}
clear_bit(__I40E_VF_DISABLE, pf->state);
+ clear_bit(__I40E_VFS_RELEASING, pf->state);
}
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index d89c22347d9d..46d884417c63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -474,7 +474,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
if (!nb_pkts)
- return false;
+ return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
@@ -491,7 +491,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
- return true;
+ return nb_pkts < budget;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 721afa0f0a88..07777ac4f098 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -199,7 +199,6 @@ enum ice_pf_state {
__ICE_NEEDS_RESTART,
__ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
__ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
- __ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */
@@ -630,7 +629,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
-bool ice_is_wol_supported(struct ice_pf *pf);
+bool ice_is_wol_supported(struct ice_hw *hw);
int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
bool is_tun);
@@ -648,6 +647,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf);
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
struct ice_rq_event_info *event);
int ice_open(struct net_device *netdev);
+int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index b13a630ea1b7..e93b1e40f627 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -721,8 +721,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
if (!data) {
data = devm_kcalloc(ice_hw_to_dev(hw),
- sizeof(*data),
ICE_AQC_FW_LOG_ID_MAX,
+ sizeof(*data),
GFP_KERNEL);
if (!data)
return ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 7d0905f25ddc..77c2307d4fb8 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -31,8 +31,8 @@ enum ice_ctl_q {
ICE_CTL_Q_MAILBOX,
};
-/* Control Queue timeout settings - max delay 250ms */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
+/* Control Queue timeout settings - max delay 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 85c9eccfdae8..43c6af42de8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
/**
* ice_cee_to_dcb_cfg
* @cee_cfg: pointer to CEE configuration struct
- * @dcbcfg: DCB configuration struct
+ * @pi: port information structure
*
* Convert CEE configuration from firmware to DCB configuration
*/
static void
ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
- struct ice_dcbx_cfg *dcbcfg)
+ struct ice_port_info *pi)
{
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+ u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
- u8 i, err, sync, oper, app_index, ice_app_sel_type;
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+ struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
u16 ice_app_prot_id_type;
- /* CEE PG data to ETS config */
+ dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+ dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+ dcbcfg->tlv_status = tlv_status;
+
+ /* CEE PG data */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
/* Note that the FW creates the oper_prio_tc nibbles reversed
@@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
}
}
- /* CEE PFC data to ETS config */
+ /* CEE PFC data */
dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
+ /* CEE APP TLV data */
+ if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+ cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg;
+ else
+ cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg;
+
app_index = 0;
for (i = 0; i < 3; i++) {
if (i == 0) {
@@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
ice_app_sel_type = ICE_APP_SEL_TCPIP;
ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+
+ for (j = 0; j < cmp_dcbcfg->numapps; j++) {
+ u16 prot_id = cmp_dcbcfg->app[j].prot_id;
+ u8 sel = cmp_dcbcfg->app[j].selector;
+
+ if (sel == ICE_APP_SEL_TCPIP &&
+ (prot_id == ICE_APP_PROT_ID_ISCSI ||
+ prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
+ ice_app_prot_id_type = prot_id;
+ break;
+ }
+ }
} else {
/* FIP APP */
ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
@@ -892,11 +915,8 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) {
/* CEE mode */
- dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
- dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
- dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
- ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+ ice_cee_to_dcb_cfg(&cee_cfg, pi);
} else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 468a63f7eff9..4180f1f35fb8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
while (ice_is_reset_in_progress(pf->state))
usleep_range(1000, 2000);
- set_bit(__ICE_DCBNL_DEVRESET, pf->state);
dev_close(netdev);
netdev_state_change(netdev);
dev_open(netdev, NULL);
netdev_state_change(netdev);
- clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 51d3a929ecfd..a39e890100d9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3442,7 +3442,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
/* Get WoL settings based on the HW capability */
- if (ice_is_wol_supported(pf)) {
+ if (ice_is_wol_supported(&pf->hw)) {
wol->supported = WAKE_MAGIC;
wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
} else {
@@ -3462,7 +3462,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
+ if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
return -EOPNOTSUPP;
/* only magic packet is supported */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 5edc0da8b8c3..16d0ee5b48a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2620,7 +2620,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
if (!locked)
rtnl_lock();
- err = ice_open(vsi->netdev);
+ err = ice_open_internal(vsi->netdev);
if (!locked)
rtnl_unlock();
@@ -2649,7 +2649,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
if (!locked)
rtnl_lock();
- ice_stop(vsi->netdev);
+ ice_vsi_close(vsi);
if (!locked)
rtnl_unlock();
@@ -3152,7 +3152,6 @@ err_vsi:
bool ice_is_reset_in_progress(unsigned long *state)
{
return test_bit(__ICE_RESET_OICR_RECV, state) ||
- test_bit(__ICE_DCBNL_DEVRESET, state) ||
test_bit(__ICE_PFR_REQ, state) ||
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 30935aaa8935..1b2f1e258e5c 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3493,15 +3493,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
- * ice_is_wol_supported - get NVM state of WoL
- * @pf: board private structure
+ * ice_is_wol_supported - check if WoL is supported
+ * @hw: pointer to hardware info
*
* Check if WoL is supported based on the HW configuration.
* Returns true if NVM supports and enables WoL for this port, false otherwise
*/
-bool ice_is_wol_supported(struct ice_pf *pf)
+bool ice_is_wol_supported(struct ice_hw *hw)
{
- struct ice_hw *hw = &pf->hw;
u16 wol_ctrl;
/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
@@ -3510,7 +3509,7 @@ bool ice_is_wol_supported(struct ice_pf *pf)
if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
return false;
- return !(BIT(hw->pf_id) & wol_ctrl);
+ return !(BIT(hw->port_info->lport) & wol_ctrl);
}
/**
@@ -4182,28 +4181,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_send_version_unroll;
}
+ /* not a fatal error if this fails */
err = ice_init_nvm_phy_type(pf->hw.port_info);
- if (err) {
+ if (err)
dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
- goto err_send_version_unroll;
- }
+ /* not a fatal error if this fails */
err = ice_update_link_info(pf->hw.port_info);
- if (err) {
+ if (err)
dev_err(dev, "ice_update_link_info failed: %d\n", err);
- goto err_send_version_unroll;
- }
ice_init_link_dflt_override(pf->hw.port_info);
/* if media available, initialize PHY settings */
if (pf->hw.port_info->phy.link_info.link_info &
ICE_AQ_MEDIA_AVAILABLE) {
+ /* not a fatal error if this fails */
err = ice_init_phy_user_cfg(pf->hw.port_info);
- if (err) {
+ if (err)
dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
- goto err_send_version_unroll;
- }
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -4564,6 +4560,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
continue;
ice_vsi_free_q_vectors(pf->vsi[v]);
}
+ ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
ice_clear_interrupt_scheme(pf);
pci_save_state(pdev);
@@ -6660,6 +6657,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ if (ice_is_reset_in_progress(pf->state)) {
+ netdev_err(netdev, "can't open net device while reset is in progress");
+ return -EBUSY;
+ }
+
+ return ice_open_internal(netdev);
+}
+
+/**
+ * ice_open_internal - Called when a network interface becomes active
+ * @netdev: network interface device structure
+ *
+ * Internal ice_open implementation. Should not be used directly except for ice_open and reset
+ * handling routine
+ *
+ * Returns 0 on success, negative value on failure
+ */
+int ice_open_internal(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
@@ -6729,6 +6748,12 @@ int ice_stop(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ if (ice_is_reset_in_progress(pf->state)) {
+ netdev_err(netdev, "can't stop net device while reset is in progress");
+ return -EBUSY;
+ }
ice_vsi_close(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 5e5683a3eb23..357d3073d814 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1238,6 +1238,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
vsi_list_id);
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_NO_MEMORY;
+
/* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list
*/
@@ -2220,6 +2223,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
fm_entry->fltr_info.vsi_handle == vsi_handle) ||
(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ fm_entry->vsi_list_info &&
(test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
}
@@ -2292,14 +2296,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
return ICE_ERR_PARAM;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
- struct ice_fltr_info *fi;
-
- fi = &fm_entry->fltr_info;
- if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+ if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
- vsi_list_head, fi);
+ vsi_list_head,
+ &fm_entry->fltr_info);
if (status)
return status;
}
@@ -2622,7 +2624,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
&remove_list_head);
mutex_unlock(rule_lock);
if (status)
- return;
+ goto free_fltr_list;
switch (lkup) {
case ICE_SW_LKUP_MAC:
@@ -2645,6 +2647,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
break;
}
+free_fltr_list:
list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
list_del(&fm_entry->list_entry);
devm_kfree(ice_hw_to_dev(hw), fm_entry);
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 2efc91b58c9e..7ead1c13f16f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -553,6 +553,7 @@ struct ice_dcb_app_priority_table {
#define ICE_TLV_STATUS_ERR 0x4
#define ICE_APP_PROT_ID_FCOE 0x8906
#define ICE_APP_PROT_ID_ISCSI 0x0cbc
+#define ICE_APP_PROT_ID_ISCSI_860 0x035c
#define ICE_APP_PROT_ID_FIP 0x8914
#define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 4def64d0e669..a9166cd85013 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -188,12 +188,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev)
}
enum {
- MLX5_INTERFACE_PROTOCOL_ETH_REP,
MLX5_INTERFACE_PROTOCOL_ETH,
+ MLX5_INTERFACE_PROTOCOL_ETH_REP,
+ MLX5_INTERFACE_PROTOCOL_IB,
MLX5_INTERFACE_PROTOCOL_IB_REP,
MLX5_INTERFACE_PROTOCOL_MPIB,
- MLX5_INTERFACE_PROTOCOL_IB,
MLX5_INTERFACE_PROTOCOL_VNET,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b425b4a539bf..e1c51eabe8fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -517,6 +517,7 @@ struct mlx5e_icosq {
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
+ u16 reserved_room;
unsigned long state;
/* control path */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 1c44000ad675..5da5e5323a44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -188,6 +188,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
}
static int
+mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
+ u32 *labels, u32 *id)
+{
+ if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
+ *id = 0;
+ return 0;
+ }
+
+ if (mapping_add(ct_priv->labels_mapping, labels, id))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void
+mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
+{
+ if (id)
+ mapping_remove(ct_priv->labels_mapping, id);
+}
+
+static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
struct flow_match_control control;
@@ -438,7 +460,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
mlx5e_mod_hdr_detach(ct_priv->dev,
ct_priv->mod_hdr_tbl, zone_rule->mh);
- mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr);
}
@@ -641,8 +663,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (!meta)
return -EOPNOTSUPP;
- err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
- &attr->ct_attr.ct_labels_id);
+ err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
+ &attr->ct_attr.ct_labels_id);
if (err)
return -EOPNOTSUPP;
if (nat) {
@@ -679,7 +701,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
err_mapping:
dealloc_mod_hdr_actions(&mod_acts);
- mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
return err;
}
@@ -747,7 +769,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err_rule:
mlx5e_mod_hdr_detach(ct_priv->dev,
ct_priv->mod_hdr_tbl, zone_rule->mh);
- mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(attr);
err_attr:
@@ -1199,7 +1221,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_
if (!priv || !ct_attr->ct_labels_id)
return;
- mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
+ mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
}
int
@@ -1302,7 +1324,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
- if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+ if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
return -EOPNOTSUPP;
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
MLX5_CT_LABELS_MASK);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 89d5ca91566e..9350ca05ce65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -21,6 +21,11 @@ enum {
MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
};
+struct mlx5e_encap_key {
+ const struct ip_tunnel_key *ip_tun_key;
+ struct mlx5e_tc_tunnel *tc_tunnel;
+};
+
struct mlx5e_tc_tunnel {
int tunnel_type;
enum mlx5_flow_match_level match_level;
@@ -44,6 +49,8 @@ struct mlx5e_tc_tunnel {
struct flow_cls_offload *f,
void *headers_c,
void *headers_v);
+ bool (*encap_info_equal)(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b);
};
extern struct mlx5e_tc_tunnel vxlan_tunnel;
@@ -103,6 +110,9 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
void *headers_c,
void *headers_v);
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b);
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 01d435e15ad3..593503bc4d07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -477,16 +477,11 @@ void mlx5e_detach_decap(struct mlx5e_priv *priv,
mlx5e_decap_dealloc(priv, d);
}
-struct encap_key {
- const struct ip_tunnel_key *ip_tun_key;
- struct mlx5e_tc_tunnel *tc_tunnel;
-};
-
-static int cmp_encap_info(struct encap_key *a,
- struct encap_key *b)
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b)
{
- return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
- a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
+ return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) == 0 &&
+ a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type;
}
static int cmp_decap_info(struct mlx5e_decap_key *a,
@@ -495,7 +490,7 @@ static int cmp_decap_info(struct mlx5e_decap_key *a,
return memcmp(&a->key, &b->key, sizeof(b->key));
}
-static int hash_encap_info(struct encap_key *key)
+static int hash_encap_info(struct mlx5e_encap_key *key)
{
return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
key->tc_tunnel->tunnel_type);
@@ -517,18 +512,18 @@ static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
}
static struct mlx5e_encap_entry *
-mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+mlx5e_encap_get(struct mlx5e_priv *priv, struct mlx5e_encap_key *key,
uintptr_t hash_key)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_encap_key e_key;
struct mlx5e_encap_entry *e;
- struct encap_key e_key;
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
e_key.ip_tun_key = &e->tun_info->key;
e_key.tc_tunnel = e->tunnel;
- if (!cmp_encap_info(&e_key, key) &&
+ if (e->tunnel->encap_info_equal(&e_key, key) &&
mlx5e_encap_take(e))
return e;
}
@@ -695,8 +690,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
unsigned long tbl_time_before = 0;
- struct encap_key key;
struct mlx5e_encap_entry *e;
+ struct mlx5e_encap_key key;
bool entry_created = false;
unsigned short family;
uintptr_t hash_key;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 7ed3f9f79f11..f5b26f5a7de4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -329,6 +329,34 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
return mlx5e_tc_tun_parse_geneve_options(priv, spec, f);
}
+static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b)
+{
+ struct ip_tunnel_info *a_info;
+ struct ip_tunnel_info *b_info;
+ bool a_has_opts, b_has_opts;
+
+ if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
+ return false;
+
+ a_has_opts = !!(a->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+ b_has_opts = !!(b->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+
+ /* keys are equal when both don't have any options attached */
+ if (!a_has_opts && !b_has_opts)
+ return true;
+
+ if (a_has_opts != b_has_opts)
+ return false;
+
+ /* geneve options stored in memory next to ip_tunnel_info struct */
+ a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key);
+ b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key);
+
+ return a_info->options_len == b_info->options_len &&
+ memcmp(a_info + 1, b_info + 1, a_info->options_len) == 0;
+}
+
struct mlx5e_tc_tunnel geneve_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_GENEVE,
.match_level = MLX5_MATCH_L4,
@@ -338,4 +366,5 @@ struct mlx5e_tc_tunnel geneve_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_geneve,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_geneve,
.parse_tunnel = mlx5e_tc_tun_parse_geneve,
+ .encap_info_equal = mlx5e_tc_tun_encap_info_equal_geneve,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
index 2805416c32a3..ada14f0574dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -94,4 +94,5 @@ struct mlx5e_tc_tunnel gre_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_gretap,
.parse_udp_ports = NULL,
.parse_tunnel = mlx5e_tc_tun_parse_gretap,
+ .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 3479672e84cf..60952b33b568 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -131,4 +131,5 @@ struct mlx5e_tc_tunnel mplsoudp_tunnel = {
.generate_ip_tun_hdr = generate_ip_tun_hdr,
.parse_udp_ports = parse_udp_ports,
.parse_tunnel = parse_tunnel,
+ .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 038a0f1cecec..4267f3a1059e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -150,4 +150,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
+ .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 2371b83dad9c..055c3bc23733 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -441,4 +441,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
return wqe_size * 2 - 1;
}
+static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+{
+ u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+
+ return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index f7c880edae37..8c0f78c09215 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -46,7 +46,8 @@ struct mlx5e_ktls_offload_context_rx {
struct tls12_crypto_info_aes_gcm_128 crypto_info;
struct accel_rule rule;
struct sock *sk;
- struct mlx5e_rq_stats *stats;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_tls_sw_stats *sw_stats;
struct completion add_ctx;
u32 tirn;
u32 key_id;
@@ -137,11 +138,10 @@ post_static_params(struct mlx5e_icosq *sq,
{
struct mlx5e_set_tls_static_params_wqe *wqe;
struct mlx5e_icosq_wqe_info wi;
- u16 pi, num_wqebbs, room;
+ u16 pi, num_wqebbs;
num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
- room = mlx5e_stop_room_for_wqe(num_wqebbs);
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+ if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -168,11 +168,10 @@ post_progress_params(struct mlx5e_icosq *sq,
{
struct mlx5e_set_tls_progress_params_wqe *wqe;
struct mlx5e_icosq_wqe_info wi;
- u16 pi, num_wqebbs, room;
+ u16 pi, num_wqebbs;
num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
- room = mlx5e_stop_room_for_wqe(num_wqebbs);
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+ if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -218,7 +217,7 @@ unlock:
return err;
err_out:
- priv_rx->stats->tls_resync_req_skip++;
+ priv_rx->rq_stats->tls_resync_req_skip++;
err = PTR_ERR(cseg);
complete(&priv_rx->add_ctx);
goto unlock;
@@ -277,17 +276,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
buf->priv_rx = priv_rx;
- BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
-
spin_lock_bh(&sq->channel->async_icosq_lock);
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+ if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
spin_unlock_bh(&sq->channel->async_icosq_lock);
err = -ENOSPC;
goto err_dma_unmap;
}
- pi = mlx5e_icosq_get_next_pi(sq, 1);
+ pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS);
wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
#define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
@@ -307,7 +304,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
- .num_wqebbs = 1,
+ .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS,
.tls_get_params.buf = buf,
};
icosq_fill_wi(sq, pi, &wi);
@@ -322,7 +319,7 @@ err_dma_unmap:
err_free:
kfree(buf);
err_out:
- priv_rx->stats->tls_resync_req_skip++;
+ priv_rx->rq_stats->tls_resync_req_skip++;
return err;
}
@@ -378,13 +375,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) {
- priv_rx->stats->tls_resync_res_skip++;
+ priv_rx->rq_stats->tls_resync_res_skip++;
err = PTR_ERR(cseg);
goto unlock;
}
/* Do not increment priv_rx refcnt, CQE handling is empty */
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- priv_rx->stats->tls_resync_res_ok++;
+ priv_rx->rq_stats->tls_resync_res_ok++;
unlock:
spin_unlock_bh(&c->async_icosq_lock);
@@ -420,13 +417,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
- priv_rx->stats->tls_resync_req_skip++;
+ priv_rx->rq_stats->tls_resync_req_skip++;
goto out;
}
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
- priv_rx->stats->tls_resync_req_end++;
+ priv_rx->rq_stats->tls_resync_req_end++;
out:
mlx5e_ktls_priv_rx_put(priv_rx);
dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
@@ -609,7 +606,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->rxq = rxq;
priv_rx->sk = sk;
- priv_rx->stats = &priv->channel_stats[rxq].rq;
+ priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
+ priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
rqtn = priv->direct_tir[rxq].rqt.rqtn;
@@ -630,7 +628,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
if (err)
goto err_post_wqes;
- priv_rx->stats->tls_ctx++;
+ atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx);
return 0;
@@ -666,7 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
if (cancel_work_sync(&resync->work))
mlx5e_ktls_priv_rx_put(priv_rx);
- priv_rx->stats->tls_del++;
+ atomic64_inc(&priv_rx->sw_stats->rx_tls_del);
if (priv_rx->rule.rule)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index d16def68ecff..51bdf71073f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
+#include "en_accel/tls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
@@ -50,6 +51,7 @@ static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx;
struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ struct mlx5e_tls_sw_stats *sw_stats;
u32 expected_seq;
u32 tisn;
u32 key_id;
@@ -99,6 +101,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
if (err)
goto err_create_key;
+ priv_tx->sw_stats = &priv->tls->sw_stats;
priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,6 +114,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
goto err_create_tis;
priv_tx->ctx_post_pending = true;
+ atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
return 0;
@@ -452,7 +456,6 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- stats->tls_ctx++;
}
seq = ntohl(tcp_hdr(skb)->seq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index bd270a85c804..4c9274d390da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -41,10 +41,13 @@
#include "en.h"
struct mlx5e_tls_sw_stats {
+ atomic64_t tx_tls_ctx;
atomic64_t tx_tls_drop_metadata;
atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data;
atomic64_t tx_tls_drop_bypass_required;
+ atomic64_t rx_tls_ctx;
+ atomic64_t rx_tls_del;
atomic64_t rx_tls_drop_resync_request;
atomic64_t rx_tls_resync_request;
atomic64_t rx_tls_resync_reply;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
index b949b9a7538b..29463bdb7715 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
@@ -45,49 +45,60 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
};
+static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
+};
+
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
-#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
-
-static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
+static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
{
- return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
+ if (!priv->tls)
+ return NULL;
+ if (mlx5_accel_is_ktls_device(priv->mdev))
+ return mlx5e_ktls_sw_stats_desc;
+ return mlx5e_tls_sw_stats_desc;
}
int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{
- if (!is_tls_atomic_stats(priv))
+ if (!priv->tls)
return 0;
-
- return NUM_TLS_SW_COUNTERS;
+ if (mlx5_accel_is_ktls_device(priv->mdev))
+ return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
+ return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
}
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
- unsigned int i, idx = 0;
+ const struct counter_desc *stats_desc;
+ unsigned int i, n, idx = 0;
- if (!is_tls_atomic_stats(priv))
- return 0;
+ stats_desc = get_tls_atomic_stats(priv);
+ n = mlx5e_tls_get_count(priv);
- for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+ for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_tls_sw_stats_desc[i].format);
+ stats_desc[i].format);
- return NUM_TLS_SW_COUNTERS;
+ return n;
}
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
- int i, idx = 0;
+ const struct counter_desc *stats_desc;
+ unsigned int i, n, idx = 0;
- if (!is_tls_atomic_stats(priv))
- return 0;
+ stats_desc = get_tls_atomic_stats(priv);
+ n = mlx5e_tls_get_count(priv);
- for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+ for (i = 0; i < n; i++)
data[idx++] =
MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
- mlx5e_tls_sw_stats_desc, i);
+ stats_desc, i);
- return NUM_TLS_SW_COUNTERS;
+ return n;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 964558086ad6..b185a0452629 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -759,11 +759,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
return 0;
}
-static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
- u32 eth_proto_cap,
- u8 connector_type, bool ext)
+static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
+ struct ethtool_link_ksettings *link_ksettings,
+ u32 eth_proto_cap, u8 connector_type)
{
- if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@@ -899,9 +899,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER,
};
-static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
+static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
{
- if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
return ptys2connector_type[connector_type];
if (eth_proto &
@@ -1002,11 +1002,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
-
- link_ksettings->base.port = get_connector_port(eth_proto_oper,
- connector_type, ext);
- ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
- connector_type, ext);
+ connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
+ connector_type : MLX5E_PORT_UNKNOWN;
+ link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
+ ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
+ connector_type);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 773449c1424b..2f47608bb9b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1041,6 +1041,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 58a2c1abb195..e58ef8c713e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -972,8 +972,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_rep_tc_enable(priv);
- mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
- 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
+ if (MLX5_CAP_GEN(mdev, uplink_follow))
+ mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+ 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index f67e51d8291a..ae0570ea08bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -116,7 +116,6 @@ static const struct counter_desc sw_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
@@ -180,8 +179,6 @@ static const struct counter_desc sw_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
@@ -342,8 +339,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
- s->rx_tls_ctx += rq_stats->tls_ctx;
- s->rx_tls_del += rq_stats->tls_del;
s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
@@ -390,7 +385,6 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
#ifdef CONFIG_MLX5_EN_TLS
s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
- s->tx_tls_ctx += sq_stats->tls_ctx;
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
@@ -1630,8 +1624,6 @@ static const struct counter_desc rq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
@@ -1658,7 +1650,6 @@ static const struct counter_desc sq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
@@ -1816,7 +1807,6 @@ static const struct counter_desc qos_sq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
- { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index ca398eac09c1..21d3b8747f93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -192,7 +192,6 @@ struct mlx5e_sw_stats {
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
- u64 tx_tls_ctx;
u64 tx_tls_ooo;
u64 tx_tls_dump_packets;
u64 tx_tls_dump_bytes;
@@ -203,8 +202,6 @@ struct mlx5e_sw_stats {
u64 rx_tls_decrypted_packets;
u64 rx_tls_decrypted_bytes;
- u64 rx_tls_ctx;
- u64 rx_tls_del;
u64 rx_tls_resync_req_pkt;
u64 rx_tls_resync_req_start;
u64 rx_tls_resync_req_end;
@@ -335,8 +332,6 @@ struct mlx5e_rq_stats {
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
- u64 tls_ctx;
- u64 tls_del;
u64 tls_resync_req_pkt;
u64 tls_resync_req_start;
u64 tls_resync_req_end;
@@ -365,7 +360,6 @@ struct mlx5e_sq_stats {
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_encrypted_packets;
u64 tls_encrypted_bytes;
- u64 tls_ctx;
u64 tls_ooo;
u64 tls_dump_packets;
u64 tls_dump_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 4e8381030d77..77c0ca655975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -936,13 +936,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
mutex_unlock(&table->lock);
}
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#define MLX5_MAX_ASYNC_EQS 4
+#else
+#define MLX5_MAX_ASYNC_EQS 3
+#endif
+
int mlx5_eq_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+ int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+ MLX5_CAP_GEN(dev, max_num_eqs) :
+ 1 << MLX5_CAP_GEN(dev, log_max_eq);
int err;
eq_table->num_comp_eqs =
- mlx5_irq_get_num_comp(eq_table->irq_table);
+ min_t(int,
+ mlx5_irq_get_num_comp(eq_table->irq_table),
+ num_eqs - MLX5_MAX_ASYNC_EQS);
err = create_async_eqs(dev);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 8b82f44bd6a7..ab32f685cbb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -400,6 +400,14 @@ esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *
return i;
}
+static bool
+esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
+{
+ return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
+ mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
+}
+
static int
esw_setup_dests(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@@ -413,9 +421,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
int err = 0;
if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
- MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
- mlx5_eswitch_vport_match_metadata_enabled(esw) &&
- MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+ esw_src_port_rewrite_supported(esw))
attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
@@ -1636,36 +1642,40 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
- /* meta send to vport */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS_2);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ if (esw_src_port_rewrite_supported(esw)) {
+ /* meta send to vport */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
- num_vfs = esw->esw_funcs.num_vfs;
- if (num_vfs) {
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
- ix += num_vfs;
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
- err);
- goto send_vport_meta_err;
+ num_vfs = esw->esw_funcs.num_vfs;
+ if (num_vfs) {
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, ix + num_vfs - 1);
+ ix += num_vfs;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
+ err);
+ goto send_vport_meta_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+ err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
+ if (err)
+ goto meta_rule_err;
}
- esw->fdb_table.offloads.send_to_vport_meta_grp = g;
-
- err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
- if (err)
- goto meta_rule_err;
}
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 97d074d7b78d..f99db88ee884 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -22,6 +22,7 @@
#include <net/red.h>
#include <net/vxlan.h>
#include <net/flow_offload.h>
+#include <net/inet_ecn.h>
#include "port.h"
#include "core.h"
@@ -367,6 +368,20 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
+static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
+ bool *trap_en)
+{
+ bool set_ce = false;
+
+ *trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+ if (set_ce)
+ return INET_ECN_CE;
+ else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0)
+ return INET_ECN_ECT_1;
+ else
+ return inner_ecn;
+}
+
static inline struct net_device *
mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 0bd64169bf81..078601d31cde 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1230,16 +1230,22 @@ mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd)
{
+ struct mlxsw_sp1_port_link_mode link;
int i;
- cmd->link_mode = -1;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->lanes = 0;
if (!carrier_ok)
return;
for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
- cmd->link_mode = mlxsw_sp1_port_link_mode[i].mask_ethtool;
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
+ link = mlxsw_sp1_port_link_mode[i];
+ ethtool_params_from_link_mode(cmd,
+ link.mask_ethtool);
+ }
}
}
@@ -1672,7 +1678,9 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
struct mlxsw_sp2_port_link_mode link;
int i;
- cmd->link_mode = -1;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->lanes = 0;
if (!carrier_ok)
return;
@@ -1680,7 +1688,8 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
link = mlxsw_sp2_port_link_mode[i];
- cmd->link_mode = link.mask_ethtool[1];
+ ethtool_params_from_link_mode(cmd,
+ link.mask_ethtool[1]);
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index b8b08a6a1d10..5facabd86882 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -337,12 +337,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
u8 inner_ecn, u8 outer_ecn)
{
char tidem_pl[MLXSW_REG_TIDEM_LEN];
- bool trap_en, set_ce = false;
u8 new_inner_ecn;
+ bool trap_en;
- trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
- new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+ new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+ &trap_en);
mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index e5ec595593f4..9eba8fa684ae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
u8 inner_ecn, u8 outer_ecn)
{
char tndem_pl[MLXSW_REG_TNDEM_LEN];
- bool trap_en, set_ce = false;
u8 new_inner_ecn;
+ bool trap_en;
- trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
- new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+ new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+ &trap_en);
mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e7ab5f3f73fd..11a1dc4c436d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -885,8 +885,8 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
}
mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
- mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
- MAC_RX_MAX_SIZE_MASK_);
+ mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
+ << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
lan743x_csr_write(adapter, MAC_RX, mac_rx);
if (enabled) {
@@ -1944,7 +1944,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
struct sk_buff *skb;
dma_addr_t dma_ptr;
- buffer_length = netdev->mtu + ETH_HLEN + 4 + RX_HEAD_PADDING;
+ buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
@@ -2040,7 +2040,7 @@ lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
dev_kfree_skb_irq(skb);
return NULL;
}
- frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 4);
+ frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
if (skb->len > frame_length) {
skb->tail -= skb->len - frame_length;
skb->len = frame_length;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 1634ca6d4a8f..c84c8bf2bc20 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
dev_kfree_skb_any(curr);
if (segs != NULL) {
curr = segs;
- segs = segs->next;
+ segs = next;
curr->next = NULL;
dev_kfree_skb_any(segs);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 0e2db6ea79e9..2ec62c8d86e1 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
dev_consume_skb_any(skb);
else
dev_kfree_skb_any(skb);
+ return;
}
nfp_ccm_rx(&bpf->ccm, skb);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e13e26e72ca0..31377923ea3d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -192,6 +192,7 @@ struct nfp_fl_internal_ports {
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
+ * @merge_table: Hash table to store merged flows
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -225,6 +226,7 @@ struct nfp_flower_priv {
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
int pre_tun_rule_cnt;
+ struct rhashtable merge_table;
};
/**
@@ -352,6 +354,12 @@ struct nfp_fl_payload_link {
};
extern const struct rhashtable_params nfp_flower_table_params;
+extern const struct rhashtable_params merge_table_params;
+
+struct nfp_merge_info {
+ u64 parent_ctx;
+ struct rhash_head ht_node;
+};
struct nfp_fl_stats_frame {
__be32 stats_con_id;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index aa06fcb38f8b..327bb56b3ef5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -490,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = {
.automatic_shrinking = true,
};
+const struct rhashtable_params merge_table_params = {
+ .key_offset = offsetof(struct nfp_merge_info, parent_ctx),
+ .head_offset = offsetof(struct nfp_merge_info, ht_node),
+ .key_len = sizeof(u64),
+};
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_num_mems)
{
@@ -506,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_flow_table;
+ err = rhashtable_init(&priv->merge_table, &merge_table_params);
+ if (err)
+ goto err_free_stats_ctx_table;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -513,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- goto err_free_stats_ctx_table;
+ goto err_free_merge_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -550,6 +560,8 @@ err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_merge_table:
+ rhashtable_destroy(&priv->merge_table);
err_free_stats_ctx_table:
rhashtable_destroy(&priv->stats_ctx_table);
err_free_flow_table:
@@ -568,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->stats_ctx_table,
nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&priv->merge_table,
+ nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index d72225d64a75..e95969c462e4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *merge_flow;
struct nfp_fl_key_ls merge_key_ls;
+ struct nfp_merge_info *merge_info;
+ u64 parent_ctx = 0;
int err;
ASSERT_RTNL();
@@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
nfp_flower_is_merge_flow(sub_flow2))
return -EINVAL;
+ /* check if the two flows are already merged */
+ parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
+ parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
+ if (rhashtable_lookup_fast(&priv->merge_table,
+ &parent_ctx, merge_table_params)) {
+ nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
+ return 0;
+ }
+
err = nfp_flower_can_merge(sub_flow1, sub_flow2);
if (err)
return err;
@@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
if (err)
goto err_release_metadata;
+ merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
+ if (!merge_info) {
+ err = -ENOMEM;
+ goto err_remove_rhash;
+ }
+ merge_info->parent_ctx = parent_ctx;
+ err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
+ merge_table_params);
+ if (err)
+ goto err_destroy_merge_info;
+
err = nfp_flower_xmit_flow(app, merge_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
if (err)
- goto err_remove_rhash;
+ goto err_remove_merge_info;
merge_flow->in_hw = true;
sub_flow1->in_hw = false;
return 0;
+err_remove_merge_info:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+ &merge_info->ht_node,
+ merge_table_params));
+err_destroy_merge_info:
+ kfree(merge_info);
err_remove_rhash:
WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
&merge_flow->fl_node,
@@ -1359,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload_link *link, *temp;
+ struct nfp_merge_info *merge_info;
struct nfp_fl_payload *origin;
+ u64 parent_ctx = 0;
bool mod = false;
int err;
@@ -1396,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
err_free_links:
/* Clean any links connected with the merged flow. */
list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
- merge_flow.list)
+ merge_flow.list) {
+ u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
+
+ parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
nfp_flower_unlink_flow(link);
+ }
+
+ merge_info = rhashtable_lookup_fast(&priv->merge_table,
+ &parent_ctx,
+ merge_table_params);
+ if (merge_info) {
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+ &merge_info->ht_node,
+ merge_table_params));
+ kfree(merge_info);
+ }
kfree(merge_flow->action_data);
kfree(merge_flow->mask_data);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 708769349f76..5b4d153b1492 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -508,6 +508,18 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
return axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
}
+static inline void axienet_lock_mii(struct axienet_local *lp)
+{
+ if (lp->mii_bus)
+ mutex_lock(&lp->mii_bus->mdio_lock);
+}
+
+static inline void axienet_unlock_mii(struct axienet_local *lp)
+{
+ if (lp->mii_bus)
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+}
+
/**
* axienet_iow - Memory mapped Axi Ethernet register write
* @lp: Pointer to axienet local structure
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 92cf9051d557..feb1aa4ec927 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1053,9 +1053,9 @@ static int axienet_open(struct net_device *ndev)
* including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_lock_mii(lp);
ret = axienet_device_reset(ndev);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ axienet_unlock_mii(lp);
ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
if (ret) {
@@ -1148,9 +1148,9 @@ static int axienet_stop(struct net_device *ndev)
}
/* Do a reset to ensure DMA is really stopped */
- mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_lock_mii(lp);
__axienet_device_reset(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ axienet_unlock_mii(lp);
cancel_work_sync(&lp->dma_err_task);
@@ -1709,9 +1709,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
* including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_lock_mii(lp);
__axienet_device_reset(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ axienet_unlock_mii(lp);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5d7a2b1469f4..e3b2375ac5eb 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -909,8 +909,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
info = skb_tunnel_info(skb);
if (info) {
- info->key.u.ipv4.dst = fl4.saddr;
- info->key.u.ipv4.src = fl4.daddr;
+ struct ip_tunnel_info *unclone;
+
+ unclone = skb_tunnel_info_unclone(skb);
+ if (unlikely(!unclone)) {
+ dst_release(&rt->dst);
+ return -ENOMEM;
+ }
+
+ unclone->key.u.ipv4.dst = fl4.saddr;
+ unclone->key.u.ipv4.src = fl4.daddr;
}
if (!pskb_may_pull(skb, ETH_HLEN)) {
@@ -994,8 +1002,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info = skb_tunnel_info(skb);
if (info) {
- info->key.u.ipv6.dst = fl6.saddr;
- info->key.u.ipv6.src = fl6.daddr;
+ struct ip_tunnel_info *unclone;
+
+ unclone = skb_tunnel_info_unclone(skb);
+ if (unlikely(!unclone)) {
+ dst_release(dst);
+ return -ENOMEM;
+ }
+
+ unclone->key.u.ipv6.dst = fl6.saddr;
+ unclone->key.u.ipv6.src = fl6.daddr;
}
if (!pskb_may_pull(skb, ETH_HLEN)) {
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 0dd0ba915ab9..23ee0b14cbfa 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
return -ENOMEM;
}
usb_anchor_urb(urb, &atusb->idle_urbs);
+ usb_free_urb(urb);
n--;
}
return 0;
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 53282a6d5928..287cccf8f7f4 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
{
- int val;
+ int val, mask = 0;
/* Enable EEE at PHY level */
val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
@@ -388,10 +388,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
if (val < 0)
return val;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported))
+ mask |= MDIO_EEE_1000T;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->supported))
+ mask |= MDIO_EEE_100TX;
+
if (enable)
- val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
+ val |= mask;
else
- val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
+ val &= ~mask;
phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6e55697315de..36443d506b67 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -69,6 +69,14 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/mutex.h>
+#include <linux/ieee802154.h>
+#include <linux/if_ltalk.h>
+#include <uapi/linux/if_fddi.h>
+#include <uapi/linux/if_hippi.h>
+#include <uapi/linux/if_fc.h>
+#include <net/ax25.h>
+#include <net/rose.h>
+#include <net/6lowpan.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
@@ -2922,6 +2930,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
return __tun_set_ebpf(tun, prog_p, prog);
}
+/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
+static unsigned char tun_get_addr_len(unsigned short type)
+{
+ switch (type) {
+ case ARPHRD_IP6GRE:
+ case ARPHRD_TUNNEL6:
+ return sizeof(struct in6_addr);
+ case ARPHRD_IPGRE:
+ case ARPHRD_TUNNEL:
+ case ARPHRD_SIT:
+ return 4;
+ case ARPHRD_ETHER:
+ return ETH_ALEN;
+ case ARPHRD_IEEE802154:
+ case ARPHRD_IEEE802154_MONITOR:
+ return IEEE802154_EXTENDED_ADDR_LEN;
+ case ARPHRD_PHONET_PIPE:
+ case ARPHRD_PPP:
+ case ARPHRD_NONE:
+ return 0;
+ case ARPHRD_6LOWPAN:
+ return EUI64_ADDR_LEN;
+ case ARPHRD_FDDI:
+ return FDDI_K_ALEN;
+ case ARPHRD_HIPPI:
+ return HIPPI_ALEN;
+ case ARPHRD_IEEE802:
+ return FC_ALEN;
+ case ARPHRD_ROSE:
+ return ROSE_ADDR_LEN;
+ case ARPHRD_NETROM:
+ return AX25_ADDR_LEN;
+ case ARPHRD_LOCALTLK:
+ return LTALK_ALEN;
+ default:
+ return 0;
+ }
+}
+
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
@@ -3085,6 +3132,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
tun->dev->type = (int) arg;
+ tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
netif_info(tun, drv, tun->dev, "linktype set to %d\n",
tun->dev->type);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 31d51346786a..9bc58e64b5b7 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index)
return serial;
}
-static int get_free_serial_index(void)
+static int obtain_minor(struct hso_serial *serial)
{
int index;
unsigned long flags;
@@ -619,8 +619,10 @@ static int get_free_serial_index(void)
spin_lock_irqsave(&serial_table_lock, flags);
for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
if (serial_table[index] == NULL) {
+ serial_table[index] = serial->parent;
+ serial->minor = index;
spin_unlock_irqrestore(&serial_table_lock, flags);
- return index;
+ return 0;
}
}
spin_unlock_irqrestore(&serial_table_lock, flags);
@@ -629,15 +631,12 @@ static int get_free_serial_index(void)
return -1;
}
-static void set_serial_by_index(unsigned index, struct hso_serial *serial)
+static void release_minor(struct hso_serial *serial)
{
unsigned long flags;
spin_lock_irqsave(&serial_table_lock, flags);
- if (serial)
- serial_table[index] = serial->parent;
- else
- serial_table[index] = NULL;
+ serial_table[serial->minor] = NULL;
spin_unlock_irqrestore(&serial_table_lock, flags);
}
@@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
static void hso_serial_tty_unregister(struct hso_serial *serial)
{
tty_unregister_device(tty_drv, serial->minor);
+ release_minor(serial);
}
static void hso_serial_common_free(struct hso_serial *serial)
@@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial)
static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
int rx_size, int tx_size)
{
- int minor;
int i;
tty_port_init(&serial->port);
- minor = get_free_serial_index();
- if (minor < 0)
+ if (obtain_minor(serial))
goto exit2;
/* register our minor number */
serial->parent->dev = tty_port_register_device_attr(&serial->port,
- tty_drv, minor, &serial->parent->interface->dev,
+ tty_drv, serial->minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups);
- if (IS_ERR(serial->parent->dev))
+ if (IS_ERR(serial->parent->dev)) {
+ release_minor(serial);
goto exit2;
+ }
- /* fill in specific data for later use */
- serial->minor = minor;
serial->magic = HSO_SERIAL_MAGIC;
spin_lock_init(&serial->serial_lock);
serial->num_rx_urbs = num_urbs;
@@ -2667,9 +2665,6 @@ static struct hso_device *hso_create_bulk_serial_device(
serial->write_data = hso_std_serial_write_data;
- /* and record this serial */
- set_serial_by_index(serial->minor, serial);
-
/* setup the proc dirs and files if needed */
hso_log_port(hso_dev);
@@ -2726,9 +2721,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
serial->shared_int->ref_count++;
mutex_unlock(&serial->shared_int->shared_int_lock);
- /* and record this serial */
- set_serial_by_index(serial->minor, serial);
-
/* setup the proc dirs and files if needed */
hso_log_port(hso_dev);
@@ -3113,7 +3105,6 @@ static void hso_free_interface(struct usb_interface *interface)
cancel_work_sync(&serial_table[i]->async_get_intf);
hso_serial_tty_unregister(serial);
kref_put(&serial_table[i]->ref, hso_serial_ref_free);
- set_serial_by_index(i, NULL);
}
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb4ea9dbc16b..101659cd4b87 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -409,9 +409,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
offset += hdr_padded_len;
p += hdr_padded_len;
- copy = len;
- if (copy > skb_tailroom(skb))
- copy = skb_tailroom(skb);
+ /* Copy all frame if it fits skb->head, otherwise
+ * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
+ */
+ if (len <= skb_tailroom(skb))
+ copy = len;
+ else
+ copy = ETH_HLEN + metasize;
skb_put_data(skb, p, copy);
if (metasize) {
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 39ee1300cdd9..02a14f1b938a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2725,12 +2725,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
} else if (err) {
if (info) {
+ struct ip_tunnel_info *unclone;
struct in_addr src, dst;
+ unclone = skb_tunnel_info_unclone(skb);
+ if (unlikely(!unclone))
+ goto tx_error;
+
src = remote_ip.sin.sin_addr;
dst = local_ip.sin.sin_addr;
- info->key.u.ipv4.src = src.s_addr;
- info->key.u.ipv4.dst = dst.s_addr;
+ unclone->key.u.ipv4.src = src.s_addr;
+ unclone->key.u.ipv4.dst = dst.s_addr;
}
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
dst_release(ndst);
@@ -2781,12 +2786,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
} else if (err) {
if (info) {
+ struct ip_tunnel_info *unclone;
struct in6_addr src, dst;
+ unclone = skb_tunnel_info_unclone(skb);
+ if (unlikely(!unclone))
+ goto tx_error;
+
src = remote_ip.sin6.sin6_addr;
dst = local_ip.sin6.sin6_addr;
- info->key.u.ipv6.src = src;
- info->key.u.ipv6.dst = dst;
+ unclone->key.u.ipv6.src = src;
+ unclone->key.u.ipv6.dst = dst;
}
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0720f5f92caa..4d9dc7d15908 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
if (pad > 0) { /* Pad the frame with zeros */
if (__skb_pad(skb, pad, false))
- goto drop;
+ goto out;
skb_put(skb, pad);
}
}
@@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop:
- dev->stats.tx_dropped++;
kfree_skb(skb);
+out:
+ dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 6d30a0fcecea..34cd8a7401fe 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2439,7 +2439,7 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool locked)
vif = ifp->vif;
cfg = wdev_to_cfg(&vif->wdev);
cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
- if (locked) {
+ if (!locked) {
rtnl_lock();
wiphy_lock(cfg->wiphy);
cfg80211_unregister_wdev(&vif->wdev);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
index 3dbc6f3f92cc..231d2517f398 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2005-2014, 2021 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#include <linux/sched.h>
@@ -26,7 +26,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
if (!list_empty(&notif_wait->notif_waits)) {
struct iwl_notification_wait *w;
- spin_lock(&notif_wait->notif_wait_lock);
+ spin_lock_bh(&notif_wait->notif_wait_lock);
list_for_each_entry(w, &notif_wait->notif_waits, list) {
int i;
bool found = false;
@@ -59,7 +59,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
triggered = true;
}
}
- spin_unlock(&notif_wait->notif_wait_lock);
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
}
return triggered;
@@ -70,10 +70,10 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{
struct iwl_notification_wait *wait_entry;
- spin_lock(&notif_wait->notif_wait_lock);
+ spin_lock_bh(&notif_wait->notif_wait_lock);
list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
wait_entry->aborted = true;
- spin_unlock(&notif_wait->notif_wait_lock);
+ spin_unlock_bh(&notif_wait->notif_wait_lock);
wake_up_all(&notif_wait->notif_waitq);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 75f99ff7f908..c4f5da76f1c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -414,6 +414,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_QNJ 0x36
#define IWL_CFG_MAC_TYPE_SO 0x37
#define IWL_CFG_MAC_TYPE_SNJ 0x42
+#define IWL_CFG_MAC_TYPE_SOF 0x43
#define IWL_CFG_MAC_TYPE_MA 0x44
#define IWL_CFG_RF_TYPE_TH 0x105
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index af684f80b0cc..c5a1e84dc1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -232,7 +232,7 @@ enum iwl_reg_capa_flags_v2 {
REG_CAPA_V2_MCS_9_ALLOWED = BIT(6),
REG_CAPA_V2_WEATHER_DISABLED = BIT(7),
REG_CAPA_V2_40MHZ_ALLOWED = BIT(8),
- REG_CAPA_V2_11AX_DISABLED = BIT(13),
+ REG_CAPA_V2_11AX_DISABLED = BIT(10),
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 130760572262..34ddef97b099 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1786,10 +1786,13 @@ static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf,
return -EINVAL;
/* value zero triggers re-sending the default table to the device */
- if (!op_id)
+ if (!op_id) {
+ mutex_lock(&mvm->mutex);
ret = iwl_rfi_send_config_cmd(mvm, NULL);
- else
+ mutex_unlock(&mvm->mutex);
+ } else {
ret = -EOPNOTSUPP; /* in the future a new table will be added */
+ }
return ret ?: count;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index 873919048143..0b818067067c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020 - 2021 Intel Corporation
*/
#include "mvm.h"
@@ -66,6 +66,8 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
return -EOPNOTSUPP;
+ lockdep_assert_held(&mvm->mutex);
+
/* in case no table is passed, use the default one */
if (!rfi_table) {
memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table));
@@ -75,9 +77,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
cmd.oem = 1;
}
- mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
- mutex_unlock(&mvm->mutex);
if (ret)
IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index c21736f80c29..af5a6dd81c41 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -272,10 +272,10 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
rx_status->chain_signal[2] = S8_MIN;
}
-static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
- struct ieee80211_hdr *hdr,
- struct iwl_rx_mpdu_desc *desc,
- u32 status)
+static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *desc,
+ u32 status)
{
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
@@ -285,6 +285,9 @@ static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
u32 len = le16_to_cpu(desc->mpdu_len);
const u8 *frame = (void *)hdr;
+ if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
+ return 0;
+
/*
* For non-beacon, we don't really care. But beacons may
* be filtered out, and we thus need the firmware's replay
@@ -356,6 +359,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
return -1;
+ if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_has_protected(hdr->frame_control)))
+ return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status);
+
if (!ieee80211_has_protected(hdr->frame_control) ||
(status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
IWL_RX_MPDU_STATUS_SEC_NONE)
@@ -411,7 +418,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
stats->flag |= RX_FLAG_DECRYPTED;
return 0;
case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
- return iwl_mvm_rx_mgmt_crypto(sta, hdr, desc, status);
+ break;
default:
/*
* Sometimes we can get frames that were not decrypted
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 8fba190e84cf..cecc32e7dbe8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -75,15 +75,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
- u32_encode_bits(250,
- CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
- CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
- u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
- CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
- u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
struct iwl_context_info_gen3 *ctxt_info_gen3;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -217,26 +208,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
- /*
- * To workaround hardware latency issues during the boot process,
- * initialize the LTR to ~250 usec (see ltr_val above).
- * The firmware initializes this again later (to a smaller value).
- */
- if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
- !trans->trans_cfg->integrated) {
- iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
- } else if (trans->trans_cfg->integrated &&
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
- iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
- iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
- }
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
- else
- iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
-
return 0;
err_free_ctxt_info:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index d1bb273d6b6d..74ce31fdf45e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -240,7 +240,6 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* kick FW self load */
iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
- iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
/* Context info will be released upon alive or failure to get one */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ffaf973dae94..558a0b2ef0fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -592,6 +592,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
/* So with HR */
IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
@@ -1040,7 +1041,31 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax201_name)
+ iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Hr */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Gf */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
#endif /* CONFIG_IWLMVM */
};
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 497ef3405da3..94ffc1ae484d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -266,6 +266,34 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
mutex_unlock(&trans_pcie->mutex);
}
+static void iwl_pcie_set_ltr(struct iwl_trans *trans)
+{
+ u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+ u32_encode_bits(250,
+ CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+ CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+ CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+ u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->trans_cfg->integrated) {
+ iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+ } else if (trans->trans_cfg->integrated &&
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+ iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
+ }
+}
+
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
@@ -332,6 +360,13 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
if (ret)
goto out;
+ iwl_pcie_set_ltr(trans);
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+ else
+ iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 381e8f90b6f2..7ae32491b5da 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -928,6 +928,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+ unsigned long flags;
if (WARN(!trans->wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
@@ -1011,10 +1012,10 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- spin_lock_bh(&txq->lock);
+ spin_lock_irqsave(&txq->lock, flags);
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags);
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
@@ -1174,7 +1175,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
unlock_reg:
spin_unlock(&trans_pcie->reg_lock);
out:
- spin_unlock_bh(&txq->lock);
+ spin_unlock_irqrestore(&txq->lock, flags);
free_dup_buf:
if (idx < 0)
kfree(dup_buf);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index 18980bb32dee..6dad7f6ab09d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -135,10 +135,10 @@
#define MT_WTBLON_TOP_BASE 0x34000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x200)
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
-#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x230)
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_BUSY BIT(31)
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index c878097f0dda..1df959532c7d 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -12,6 +12,7 @@
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <linux/etherdevice.h>
+#include <linux/math64.h>
#include <linux/module.h>
static struct wiphy *common_wiphy;
@@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work)
scan_result.work);
struct wiphy *wiphy = priv_to_wiphy(priv);
struct cfg80211_scan_info scan_info = { .aborted = false };
+ u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
CFG80211_BSS_FTYPE_PRESP,
- fake_router_bssid,
- ktime_get_boottime_ns(),
+ fake_router_bssid, tsf,
WLAN_CAPABILITY_ESS, 0,
(void *)&ssid, sizeof(ssid),
DBM_TO_MBM(-50), GFP_KERNEL);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index dcc1dd96911a..adb26aff481d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -205,7 +205,7 @@ static void populate_properties(const void *blob,
*pprev = NULL;
}
-static bool populate_node(const void *blob,
+static int populate_node(const void *blob,
int offset,
void **mem,
struct device_node *dad,
@@ -214,24 +214,24 @@ static bool populate_node(const void *blob,
{
struct device_node *np;
const char *pathp;
- unsigned int l, allocl;
+ int len;
- pathp = fdt_get_name(blob, offset, &l);
+ pathp = fdt_get_name(blob, offset, &len);
if (!pathp) {
*pnp = NULL;
- return false;
+ return len;
}
- allocl = ++l;
+ len++;
- np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
+ np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
__alignof__(struct device_node));
if (!dryrun) {
char *fn;
of_node_init(np);
np->full_name = fn = ((char *)np) + sizeof(*np);
- memcpy(fn, pathp, l);
+ memcpy(fn, pathp, len);
if (dad != NULL) {
np->parent = dad;
@@ -295,6 +295,7 @@ static int unflatten_dt_nodes(const void *blob,
struct device_node *nps[FDT_MAX_DEPTH];
void *base = mem;
bool dryrun = !base;
+ int ret;
if (nodepp)
*nodepp = NULL;
@@ -322,9 +323,10 @@ static int unflatten_dt_nodes(const void *blob,
!of_fdt_device_is_available(blob, offset))
continue;
- if (!populate_node(blob, offset, &mem, nps[depth],
- &nps[depth+1], dryrun))
- return mem - base;
+ ret = populate_node(blob, offset, &mem, nps[depth],
+ &nps[depth+1], dryrun);
+ if (ret < 0)
+ return ret;
if (!dryrun && nodepp && !*nodepp)
*nodepp = nps[depth+1];
@@ -372,6 +374,10 @@ void *__unflatten_device_tree(const void *blob,
{
int size;
void *mem;
+ int ret;
+
+ if (mynodes)
+ *mynodes = NULL;
pr_debug(" -> unflatten_device_tree()\n");
@@ -392,7 +398,7 @@ void *__unflatten_device_tree(const void *blob,
/* First pass, scan for size */
size = unflatten_dt_nodes(blob, NULL, dad, NULL);
- if (size < 0)
+ if (size <= 0)
return NULL;
size = ALIGN(size, 4);
@@ -410,12 +416,16 @@ void *__unflatten_device_tree(const void *blob,
pr_debug(" unflattening %p...\n", mem);
/* Second pass, do actual unflattening */
- unflatten_dt_nodes(blob, mem, dad, mynodes);
+ ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
+
if (be32_to_cpup(mem + size) != 0xdeadbeef)
pr_warn("End of tree marker overwritten: %08x\n",
be32_to_cpup(mem + size));
- if (detached && mynodes) {
+ if (ret <= 0)
+ return NULL;
+
+ if (detached && mynodes && *mynodes) {
of_node_set_flag(*mynodes, OF_DETACHED);
pr_debug("unflattened tree is detached\n");
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index d9e6a324de0a..d717efbd637d 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -8,6 +8,8 @@
* Copyright (C) 1996-2005 Paul Mackerras.
*/
+#define FDT_ALIGN_SIZE 8
+
/**
* struct alias_prop - Alias property in 'aliases' node
* @link: List node to link the structure in aliases_lookup list
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 50bbe0edf538..23effe5e50ec 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -57,7 +57,7 @@ struct fragment {
* struct overlay_changeset
* @id: changeset identifier
* @ovcs_list: list on which we are located
- * @fdt: FDT that was unflattened to create @overlay_tree
+ * @fdt: base of memory allocated to hold aligned FDT that was unflattened to create @overlay_tree
* @overlay_tree: expanded device tree that contains the fragment nodes
* @count: count of fragment structures
* @fragments: fragment nodes in the overlay expanded device tree
@@ -719,8 +719,8 @@ static struct device_node *find_target(struct device_node *info_node)
/**
* init_overlay_changeset() - initialize overlay changeset from overlay tree
* @ovcs: Overlay changeset to build
- * @fdt: the FDT that was unflattened to create @tree
- * @tree: Contains all the overlay fragments and overlay fixup nodes
+ * @fdt: base of memory allocated to hold aligned FDT that was unflattened to create @tree
+ * @tree: Contains the overlay fragments and overlay fixup nodes
*
* Initialize @ovcs. Populate @ovcs->fragments with node information from
* the top level of @tree. The relevant top level nodes are the fragment
@@ -873,7 +873,7 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
* internal documentation
*
* of_overlay_apply() - Create and apply an overlay changeset
- * @fdt: the FDT that was unflattened to create @tree
+ * @fdt: base of memory allocated to hold the aligned FDT
* @tree: Expanded overlay device tree
* @ovcs_id: Pointer to overlay changeset id
*
@@ -953,7 +953,9 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
/*
* after overlay_notify(), ovcs->overlay_tree related pointers may have
* leaked to drivers, so can not kfree() tree, aka ovcs->overlay_tree;
- * and can not free fdt, aka ovcs->fdt
+ * and can not free memory containing aligned fdt. The aligned fdt
+ * is contained within the memory at ovcs->fdt, possibly at an offset
+ * from ovcs->fdt.
*/
ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY);
if (ret) {
@@ -1014,10 +1016,11 @@ out:
int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
int *ovcs_id)
{
- const void *new_fdt;
+ void *new_fdt;
+ void *new_fdt_align;
int ret;
u32 size;
- struct device_node *overlay_root;
+ struct device_node *overlay_root = NULL;
*ovcs_id = 0;
ret = 0;
@@ -1036,11 +1039,14 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
* Must create permanent copy of FDT because of_fdt_unflatten_tree()
* will create pointers to the passed in FDT in the unflattened tree.
*/
- new_fdt = kmemdup(overlay_fdt, size, GFP_KERNEL);
+ new_fdt = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
if (!new_fdt)
return -ENOMEM;
- of_fdt_unflatten_tree(new_fdt, NULL, &overlay_root);
+ new_fdt_align = PTR_ALIGN(new_fdt, FDT_ALIGN_SIZE);
+ memcpy(new_fdt_align, overlay_fdt, size);
+
+ of_fdt_unflatten_tree(new_fdt_align, NULL, &overlay_root);
if (!overlay_root) {
pr_err("unable to unflatten overlay_fdt\n");
ret = -EINVAL;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 5036a362f52e..78427c85bef3 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1262,7 +1262,16 @@ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
-DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
+
+static struct device_node *parse_gpios(struct device_node *np,
+ const char *prop_name, int index)
+{
+ if (!strcmp_suffix(prop_name, ",nr-gpios"))
+ return NULL;
+
+ return parse_suffix_prop_cells(np, prop_name, index, "-gpios",
+ "#gpio-cells");
+}
static struct device_node *parse_iommu_maps(struct device_node *np,
const char *prop_name, int index)
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index eb100627c186..819a20acaa93 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
@@ -1408,7 +1409,8 @@ static void attach_node_and_children(struct device_node *np)
static int __init unittest_data_add(void)
{
void *unittest_data;
- struct device_node *unittest_data_node, *np;
+ void *unittest_data_align;
+ struct device_node *unittest_data_node = NULL, *np;
/*
* __dtb_testcases_begin[] and __dtb_testcases_end[] are magically
* created by cmd_dt_S_dtb in scripts/Makefile.lib
@@ -1417,21 +1419,29 @@ static int __init unittest_data_add(void)
extern uint8_t __dtb_testcases_end[];
const int size = __dtb_testcases_end - __dtb_testcases_begin;
int rc;
+ void *ret;
if (!size) {
- pr_warn("%s: No testcase data to attach; not running tests\n",
- __func__);
+ pr_warn("%s: testcases is empty\n", __func__);
return -ENODATA;
}
/* creating copy */
- unittest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL);
+ unittest_data = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
if (!unittest_data)
return -ENOMEM;
- of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
+ unittest_data_align = PTR_ALIGN(unittest_data, FDT_ALIGN_SIZE);
+ memcpy(unittest_data_align, __dtb_testcases_begin, size);
+
+ ret = of_fdt_unflatten_tree(unittest_data_align, NULL, &unittest_data_node);
+ if (!ret) {
+ pr_warn("%s: unflatten testcases tree failed\n", __func__);
+ kfree(unittest_data);
+ return -ENODATA;
+ }
if (!unittest_data_node) {
- pr_warn("%s: No tree to attach; not running tests\n", __func__);
+ pr_warn("%s: testcases tree is empty\n", __func__);
kfree(unittest_data);
return -ENODATA;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 8085782cd8f9..9f3361c13ded 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1357,6 +1357,7 @@ static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl,
gpps[i].gpio_base = 0;
break;
case INTEL_GPIO_BASE_NOMAP:
+ break;
default:
break;
}
@@ -1393,6 +1394,7 @@ static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl,
gpps[i].size = min(gpp_size, npins);
npins -= gpps[i].size;
+ gpps[i].gpio_base = gpps[i].base;
gpps[i].padown_num = padown_num;
/*
@@ -1491,8 +1493,13 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
if (IS_ERR(regs))
return PTR_ERR(regs);
- /* Determine community features based on the revision */
+ /*
+ * Determine community features based on the revision.
+ * A value of all ones means the device is not present.
+ */
value = readl(regs + REVID);
+ if (value == ~0u)
+ return -ENODEV;
if (((value & REVID_MASK) >> REVID_SHIFT) >= 0x94) {
community->features |= PINCTRL_FEATURE_DEBOUNCE;
community->features |= PINCTRL_FEATURE_1K_PD;
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index f35edb0eac40..c12fa57ebd12 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -572,7 +572,7 @@ static void microchip_sgpio_irq_settype(struct irq_data *data,
/* Type value spread over 2 registers sets: low, high bit */
sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit,
BIT(addr.port), (!!(type & 0x1)) << addr.port);
- sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit,
+ sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit,
BIT(addr.port), (!!(type & 0x2)) << addr.port);
if (type == SGPIO_INT_TRG_LEVEL)
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index aa1a1c850d05..53a0badc6b03 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3727,12 +3727,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
{
struct rockchip_pinctrl *info = dev_get_drvdata(dev);
- int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
- rk3288_grf_gpio6c_iomux |
- GPIO6C6_SEL_WRITE_ENABLE);
+ int ret;
- if (ret)
- return ret;
+ if (info->ctrl->type == RK3288) {
+ ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+ rk3288_grf_gpio6c_iomux |
+ GPIO6C6_SEL_WRITE_ENABLE);
+ if (ret)
+ return ret;
+ }
return pinctrl_force_default(info->pctl_dev);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 369ee20a7ea9..2f19ab4db720 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -392,7 +392,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
unsigned long *configs, unsigned int nconfs)
{
struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev);
- unsigned int param, arg, pullup, strength;
+ unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2;
bool value, output_enabled = false;
const struct lpi_pingroup *g;
unsigned long sval;
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 8daccd530285..9d41abfca37e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1439,14 +1439,14 @@ static const struct msm_pingroup sc7280_groups[] = {
[172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
[173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
[174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
- [175] = UFS_RESET(ufs_reset, 0x1be000),
- [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0),
- [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6),
- [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3),
- [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0),
- [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6),
- [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3),
- [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0),
+ [175] = UFS_RESET(ufs_reset, 0xbe000),
+ [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6),
+ [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6),
+ [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3),
+ [179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0),
+ [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6),
+ [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3),
+ [182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0),
};
static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 2b5b0e2b03ad..5aaf57b40407 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -423,7 +423,7 @@ static const char * const gpio_groups[] = {
static const char * const qdss_stm_groups[] = {
"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
- "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22",
+ "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
"gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62",
"gpio63", "gpio64", "gpio65", "gpio66",
};
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 57cc92891a57..078648a9201b 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -483,11 +483,16 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
goto wakeup;
/*
- * Switch events will wake the device and report the new switch
- * position to the input subsystem.
+ * Some devices send (duplicate) tablet-mode events when moved
+ * around even though the mode has not changed; and they do this
+ * even when suspended.
+ * Update the switch state in case it changed and then return
+ * without waking up to avoid spurious wakeups.
*/
- if (priv->switches && (event == 0xcc || event == 0xcd))
- goto wakeup;
+ if (event == 0xcc || event == 0xcd) {
+ report_tablet_mode_event(priv->switches, event);
+ return;
+ }
/* Wake up on 5-button array events only. */
if (event == 0xc0 || !priv->array)
@@ -501,9 +506,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
wakeup:
pm_wakeup_hard_event(&device->dev);
- if (report_tablet_mode_event(priv->switches, event))
- return;
-
return;
}
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index 7b0cd08db446..ba020a45f238 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -125,7 +125,7 @@ static const struct regulator_ops vid_ops = {
static const struct regulator_desc regulators[] = {
BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
- 0x80, 600000, 10000, 0x3c),
+ 0x6f, 600000, 10000, 0x3c),
BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
16, 1625000, 25000, 0),
BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf,
@@ -134,7 +134,7 @@ static const struct regulator_desc regulators[] = {
11, 2800000, 100000, 0),
BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops,
BD9571MWV_DVFS_MONIVDAC, 0x7f,
- 0x80, 600000, 10000, 0x3c),
+ 0x6f, 600000, 10000, 0x3c),
};
#ifdef CONFIG_PM_SLEEP
@@ -174,7 +174,7 @@ static ssize_t backup_mode_show(struct device *dev,
{
struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
+ return sysfs_emit(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
}
static ssize_t backup_mode_store(struct device *dev,
@@ -301,7 +301,7 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev)
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s regulator\n",
- pdev->name);
+ regulators[i].name);
return PTR_ERR(rdev);
}
}
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 2667919d76b3..dcb380e868df 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -450,6 +450,24 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
if (len == 0)
return NULL;
+ /*
+ * GNU binutils do not support multiple address spaces. The GNU
+ * linker's default linker script places IRAM at an arbitrary high
+ * offset, in order to differentiate it from DRAM. Hence we need to
+ * strip the artificial offset in the IRAM addresses coming from the
+ * ELF file.
+ *
+ * The TI proprietary linker would never set those higher IRAM address
+ * bits anyway. PRU architecture limits the program counter to 16-bit
+ * word-address range. This in turn corresponds to 18-bit IRAM
+ * byte-address range for ELF.
+ *
+ * Two more bits are added just in case to make the final 20-bit mask.
+ * Idea is to have a safeguard in case TI decides to add banking
+ * in future SoCs.
+ */
+ da &= 0xfffff;
+
if (da >= PRU_IRAM_DA &&
da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
offset = da - PRU_IRAM_DA;
@@ -585,7 +603,7 @@ pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
break;
}
- if (pru->data->is_k3 && is_iram) {
+ if (pru->data->is_k3) {
ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
filesz);
if (ret) {
diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
index 5521c4437ffa..7c007dd7b200 100644
--- a/drivers/remoteproc/qcom_pil_info.c
+++ b/drivers/remoteproc/qcom_pil_info.c
@@ -56,7 +56,7 @@ static int qcom_pil_info_init(void)
memset_io(base, 0, resource_size(&imem));
_reloc.base = base;
- _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+ _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
return 0;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index f140eafc0d6a..61831f2fdb30 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2372,6 +2372,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
}
/**
+ * ibmvfc_event_is_free - Check if event is free or not
+ * @evt: ibmvfc event struct
+ *
+ * Returns:
+ * true / false
+ **/
+static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_event *loop_evt;
+
+ list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
+ if (loop_evt == evt)
+ return true;
+
+ return false;
+}
+
+/**
* ibmvfc_wait_for_ops - Wait for ops to complete
* @vhost: ibmvfc host struct
* @device: device to match (starget or sdev)
@@ -2385,35 +2403,58 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
{
struct ibmvfc_event *evt;
DECLARE_COMPLETION_ONSTACK(comp);
- int wait;
+ int wait, i, q_index, q_size;
unsigned long flags;
signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
+ struct ibmvfc_queue *queues;
ENTER;
+ if (vhost->mq_enabled && vhost->using_channels) {
+ queues = vhost->scsi_scrqs.scrqs;
+ q_size = vhost->scsi_scrqs.active_queues;
+ } else {
+ queues = &vhost->crq;
+ q_size = 1;
+ }
+
do {
wait = 0;
- spin_lock_irqsave(&vhost->crq.l_lock, flags);
- list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
- if (match(evt, device)) {
- evt->eh_comp = &comp;
- wait++;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ for (q_index = 0; q_index < q_size; q_index++) {
+ spin_lock(&queues[q_index].l_lock);
+ for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+ evt = &queues[q_index].evt_pool.events[i];
+ if (!ibmvfc_event_is_free(evt)) {
+ if (match(evt, device)) {
+ evt->eh_comp = &comp;
+ wait++;
+ }
+ }
}
+ spin_unlock(&queues[q_index].l_lock);
}
- spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait) {
timeout = wait_for_completion_timeout(&comp, timeout);
if (!timeout) {
wait = 0;
- spin_lock_irqsave(&vhost->crq.l_lock, flags);
- list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
- if (match(evt, device)) {
- evt->eh_comp = NULL;
- wait++;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ for (q_index = 0; q_index < q_size; q_index++) {
+ spin_lock(&queues[q_index].l_lock);
+ for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+ evt = &queues[q_index].evt_pool.events[i];
+ if (!ibmvfc_event_is_free(evt)) {
+ if (match(evt, device)) {
+ evt->eh_comp = NULL;
+ wait++;
+ }
+ }
}
+ spin_unlock(&queues[q_index].l_lock);
}
- spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait)
dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
LEAVE;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index ac066f86bb14..ac0eef975f17 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -7806,14 +7806,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pend_os_device_add_sz++;
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
GFP_KERNEL);
- if (!ioc->pend_os_device_add)
+ if (!ioc->pend_os_device_add) {
+ r = -ENOMEM;
goto out_free_resources;
+ }
ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
ioc->device_remove_in_progress =
kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
- if (!ioc->device_remove_in_progress)
+ if (!ioc->device_remove_in_progress) {
+ r = -ENOMEM;
goto out_free_resources;
+ }
ioc->fwfault_debug = mpt3sas_fwfault_debug;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 47ad64b06623..69c5b5ee2169 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1675,6 +1675,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
if (!qedi->global_queues[i]) {
QEDI_ERR(&qedi->dbg_ctx,
"Unable to allocation global queue %d.\n", i);
+ status = -ENOMEM;
goto mem_alloc_failure;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index c48daf52725d..480e7d2dcf3e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3222,8 +3222,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
cmd->state = QLA_TGT_STATE_PROCESSED;
- res = 0;
- goto free;
+ return 0;
}
ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
@@ -3234,8 +3233,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
- if (unlikely(res != 0))
- goto free;
+ if (unlikely(res != 0)) {
+ return res;
+ }
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
@@ -3255,8 +3255,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- res = 0;
- goto free;
+ return 0;
}
/* Does F/W have an IOCBs for this request */
@@ -3359,8 +3358,6 @@ out_unmap_unlock:
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-free:
- vha->hw->tgt.tgt_ops->free_cmd(cmd);
return res;
}
EXPORT_SYMBOL(qlt_xmit_response);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index b55fc768a2a7..8b4890cdd4ca 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -644,7 +644,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- struct scsi_qla_host *vha = cmd->vha;
if (cmd->aborted) {
/* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
@@ -657,7 +656,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->se_cmd.transport_state,
cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
- vha->hw->tgt.tgt_ops->free_cmd(cmd);
return 0;
}
@@ -685,7 +683,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- struct scsi_qla_host *vha = cmd->vha;
int xmit_type = QLA_TGT_XMIT_STATUS;
if (cmd->aborted) {
@@ -699,7 +696,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd, kref_read(&cmd->se_cmd.cmd_kref),
cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
- vha->hw->tgt.tgt_ops->free_cmd(cmd);
return 0;
}
cmd->bufflen = se_cmd->data_length;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 91074fd97f64..f4bf62b007a0 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2475,6 +2475,7 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
*/
mutex_lock(&conn_mutex);
conn->transport->stop_conn(conn, flag);
+ conn->state = ISCSI_CONN_DOWN;
mutex_unlock(&conn_mutex);
}
@@ -2901,6 +2902,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
default:
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
+ if ((conn->state == ISCSI_CONN_BOUND) ||
+ (conn->state == ISCSI_CONN_UP)) {
+ err = transport->set_param(conn, ev->u.set_param.param,
+ data, ev->u.set_param.len);
+ } else {
+ return -ENOTCONN;
+ }
}
return err;
@@ -2960,6 +2968,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
mutex_lock(&conn->ep_mutex);
conn->ep = NULL;
mutex_unlock(&conn->ep_mutex);
+ conn->state = ISCSI_CONN_DOWN;
}
transport->ep_disconnect(ep);
@@ -3727,6 +3736,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
ev->r.retcode = transport->bind_conn(session, conn,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
+ if (!ev->r.retcode)
+ conn->state = ISCSI_CONN_BOUND;
mutex_unlock(&conn_mutex);
if (ev->r.retcode || !transport->ep_connect)
@@ -3966,7 +3977,8 @@ iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
static const char *const connection_state_names[] = {
[ISCSI_CONN_UP] = "up",
[ISCSI_CONN_DOWN] = "down",
- [ISCSI_CONN_FAILED] = "failed"
+ [ISCSI_CONN_FAILED] = "failed",
+ [ISCSI_CONN_BOUND] = "bound"
};
static ssize_t show_conn_state(struct device *dev,
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index a1b9be1d105a..fde4edd83c14 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -186,7 +186,7 @@ struct qm_eqcr_entry {
__be32 tag;
struct qm_fd fd;
u8 __reserved3[32];
-} __packed;
+} __packed __aligned(8);
#define QM_EQCR_VERB_VBIT 0x80
#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
index 6268bfa7f0d6..c3e379a990f2 100644
--- a/drivers/soc/litex/litex_soc_ctrl.c
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -13,7 +13,6 @@
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/module.h>
-#include <linux/errno.h>
#include <linux/io.h>
#include <linux/reboot.h>
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index f42954e2c98e..1fd29f93ff6d 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -3,7 +3,6 @@
#include <linux/acpi.h>
#include <linux/clk.h>
-#include <linux/console.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -92,14 +91,11 @@ struct geni_wrapper {
struct device *dev;
void __iomem *base;
struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
- struct geni_icc_path to_core;
};
static const char * const icc_path_names[] = {"qup-core", "qup-config",
"qup-memory"};
-static struct geni_wrapper *earlycon_wrapper;
-
#define QUP_HW_VER_REG 0x4
/* Common SE registers */
@@ -843,44 +839,11 @@ int geni_icc_disable(struct geni_se *se)
}
EXPORT_SYMBOL(geni_icc_disable);
-void geni_remove_earlycon_icc_vote(void)
-{
- struct platform_device *pdev;
- struct geni_wrapper *wrapper;
- struct device_node *parent;
- struct device_node *child;
-
- if (!earlycon_wrapper)
- return;
-
- wrapper = earlycon_wrapper;
- parent = of_get_next_parent(wrapper->dev->of_node);
- for_each_child_of_node(parent, child) {
- if (!of_device_is_compatible(child, "qcom,geni-se-qup"))
- continue;
-
- pdev = of_find_device_by_node(child);
- if (!pdev)
- continue;
-
- wrapper = platform_get_drvdata(pdev);
- icc_put(wrapper->to_core.path);
- wrapper->to_core.path = NULL;
-
- }
- of_node_put(parent);
-
- earlycon_wrapper = NULL;
-}
-EXPORT_SYMBOL(geni_remove_earlycon_icc_vote);
-
static int geni_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct geni_wrapper *wrapper;
- struct console __maybe_unused *bcon;
- bool __maybe_unused has_earlycon = false;
int ret;
wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
@@ -903,43 +866,6 @@ static int geni_se_probe(struct platform_device *pdev)
}
}
-#ifdef CONFIG_SERIAL_EARLYCON
- for_each_console(bcon) {
- if (!strcmp(bcon->name, "qcom_geni")) {
- has_earlycon = true;
- break;
- }
- }
- if (!has_earlycon)
- goto exit;
-
- wrapper->to_core.path = devm_of_icc_get(dev, "qup-core");
- if (IS_ERR(wrapper->to_core.path))
- return PTR_ERR(wrapper->to_core.path);
- /*
- * Put minmal BW request on core clocks on behalf of early console.
- * The vote will be removed earlycon exit function.
- *
- * Note: We are putting vote on each QUP wrapper instead only to which
- * earlycon is connected because QUP core clock of different wrapper
- * share same voltage domain. If core1 is put to 0, then core2 will
- * also run at 0, if not voted. Default ICC vote will be removed ASA
- * we touch any of the core clock.
- * core1 = core2 = max(core1, core2)
- */
- ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW,
- GENI_DEFAULT_BW);
- if (ret) {
- dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n",
- __func__, ret);
- return ret;
- }
-
- if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart"))
- earlycon_wrapper = wrapper;
- of_node_put(pdev->dev.of_node);
-exit:
-#endif
dev_set_drvdata(dev, wrapper);
dev_dbg(dev, "GENI SE Driver probed\n");
return devm_of_platform_populate(dev);
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index bf1468e5bccb..51143a68a889 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -332,7 +332,7 @@ static const struct omap_prm_data dra7_prm_data[] = {
{
.name = "l3init", .base = 0x4ae07300,
.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
- .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
.clkdm_name = "pcie"
},
{
@@ -830,8 +830,12 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
reset->prm->data->name, id);
exit:
- if (reset->clkdm)
+ if (reset->clkdm) {
+ /* At least dra7 iva needs a delay before clkdm idle */
+ if (has_rstst)
+ udelay(1);
pdata->clkdm_allow_idle(reset->clkdm);
+ }
return ret;
}
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index b84f00b8d18b..4cabaf21c1ca 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1105,7 +1105,7 @@ struct rtllib_network {
bool bWithAironetIE;
bool bCkipSupported;
bool bCcxRmEnable;
- u16 CcxRmState[2];
+ u8 CcxRmState[2];
bool bMBssidValid;
u8 MBssidMask;
u8 MBssid[ETH_ALEN];
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 66c135321da4..15bbb63ca130 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1967,7 +1967,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
info_element->data[2] == 0x96 &&
info_element->data[3] == 0x01) {
if (info_element->len == 6) {
- memcpy(network->CcxRmState, &info_element[4], 2);
+ memcpy(network->CcxRmState, &info_element->data[4], 2);
if (network->CcxRmState[0] != 0)
network->bCcxRmEnable = true;
else
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 3cbc074992bc..9ee797b8cb7e 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -882,7 +882,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio) {
new_bio:
nr_vecs = bio_max_segs(nr_pages);
- nr_pages -= nr_vecs;
/*
* Calls bio_kmalloc() and sets bio->bi_end_io()
*/
@@ -939,6 +938,14 @@ new_bio:
return 0;
fail:
+ if (bio)
+ bio_put(bio);
+ while (req->bio) {
+ bio = req->bio;
+ req->bio = bio->bi_next;
+ bio_put(bio);
+ }
+ req->biotail = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 291649f02821..0d85b55ea823 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1177,12 +1177,6 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
struct console *con) { }
#endif
-static int qcom_geni_serial_earlycon_exit(struct console *con)
-{
- geni_remove_earlycon_icc_vote();
- return 0;
-}
-
static struct qcom_geni_private_data earlycon_private_data;
static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
@@ -1233,7 +1227,6 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
dev->con->write = qcom_geni_serial_earlycon_write;
- dev->con->exit = qcom_geni_serial_earlycon_exit;
dev->con->setup = NULL;
qcom_geni_serial_enable_early_read(&se, dev->con);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 39ddb5585ded..3fda1ec961d7 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control)
#define acm_send_break(acm, ms) \
acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
-static void acm_kill_urbs(struct acm *acm)
+static void acm_poison_urbs(struct acm *acm)
{
int i;
- usb_kill_urb(acm->ctrlurb);
+ usb_poison_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
- usb_kill_urb(acm->wb[i].urb);
+ usb_poison_urb(acm->wb[i].urb);
for (i = 0; i < acm->rx_buflimit; i++)
- usb_kill_urb(acm->read_urbs[i]);
+ usb_poison_urb(acm->read_urbs[i]);
+}
+
+static void acm_unpoison_urbs(struct acm *acm)
+{
+ int i;
+
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_unpoison_urb(acm->read_urbs[i]);
+ for (i = 0; i < ACM_NW; i++)
+ usb_unpoison_urb(acm->wb[i].urb);
+ usb_unpoison_urb(acm->ctrlurb);
}
+
/*
* Write buffer management.
* All of these assume proper locks taken by the caller.
@@ -226,9 +238,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
if (rc < 0) {
- dev_err(&acm->data->dev,
- "%s - usb_submit_urb(write bulk) failed: %d\n",
- __func__, rc);
+ if (rc != -EPERM)
+ dev_err(&acm->data->dev,
+ "%s - usb_submit_urb(write bulk) failed: %d\n",
+ __func__, rc);
acm_write_done(acm, wb);
}
return rc;
@@ -313,8 +326,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
acm->iocount.dsr++;
if (difference & ACM_CTRL_DCD)
acm->iocount.dcd++;
- if (newctrl & ACM_CTRL_BRK)
+ if (newctrl & ACM_CTRL_BRK) {
acm->iocount.brk++;
+ tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
+ }
if (newctrl & ACM_CTRL_RI)
acm->iocount.rng++;
if (newctrl & ACM_CTRL_FRAMING)
@@ -480,11 +495,6 @@ static void acm_read_bulk_callback(struct urb *urb)
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
rb->index, urb->actual_length, status);
- if (!acm->dev) {
- dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
- return;
- }
-
switch (status) {
case 0:
usb_mark_last_busy(acm->dev);
@@ -649,7 +659,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
res = acm_set_control(acm, val);
if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
- dev_err(&acm->control->dev, "failed to set dtr/rts\n");
+ /* This is broken in too many devices to spam the logs */
+ dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
}
static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
@@ -731,6 +742,7 @@ static void acm_port_shutdown(struct tty_port *port)
* Need to grab write_lock to prevent race with resume, but no need to
* hold it due to the tty-port initialised flag.
*/
+ acm_poison_urbs(acm);
spin_lock_irq(&acm->write_lock);
spin_unlock_irq(&acm->write_lock);
@@ -747,7 +759,8 @@ static void acm_port_shutdown(struct tty_port *port)
usb_autopm_put_interface_async(acm->control);
}
- acm_kill_urbs(acm);
+ acm_unpoison_urbs(acm);
+
}
static void acm_tty_cleanup(struct tty_struct *tty)
@@ -1296,13 +1309,6 @@ skip_normal_probe:
if (!combined_interfaces && intf != control_interface)
return -ENODEV;
- if (!combined_interfaces && usb_interface_claimed(data_interface)) {
- /* valid in this context */
- dev_dbg(&intf->dev, "The data interface isn't available\n");
- return -EBUSY;
- }
-
-
if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
control_interface->cur_altsetting->desc.bNumEndpoints == 0)
return -EINVAL;
@@ -1323,8 +1329,8 @@ made_compressed_probe:
dev_dbg(&intf->dev, "interfaces are valid\n");
acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
- if (acm == NULL)
- goto alloc_fail;
+ if (!acm)
+ return -ENOMEM;
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
@@ -1341,7 +1347,7 @@ made_compressed_probe:
minor = acm_alloc_minor(acm);
if (minor < 0)
- goto alloc_fail1;
+ goto err_put_port;
acm->minor = minor;
acm->dev = usb_dev;
@@ -1372,15 +1378,15 @@ made_compressed_probe:
buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
if (!buf)
- goto alloc_fail1;
+ goto err_put_port;
acm->ctrl_buffer = buf;
if (acm_write_buffers_alloc(acm) < 0)
- goto alloc_fail2;
+ goto err_free_ctrl_buffer;
acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL);
if (!acm->ctrlurb)
- goto alloc_fail3;
+ goto err_free_write_buffers;
for (i = 0; i < num_rx_buf; i++) {
struct acm_rb *rb = &(acm->read_buffers[i]);
@@ -1389,13 +1395,13 @@ made_compressed_probe:
rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL,
&rb->dma);
if (!rb->base)
- goto alloc_fail4;
+ goto err_free_read_urbs;
rb->index = i;
rb->instance = acm;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
- goto alloc_fail4;
+ goto err_free_read_urbs;
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = rb->dma;
@@ -1416,8 +1422,8 @@ made_compressed_probe:
struct acm_wb *snd = &(acm->wb[i]);
snd->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (snd->urb == NULL)
- goto alloc_fail5;
+ if (!snd->urb)
+ goto err_free_write_urbs;
if (usb_endpoint_xfer_int(epwrite))
usb_fill_int_urb(snd->urb, usb_dev, acm->out,
@@ -1435,7 +1441,7 @@ made_compressed_probe:
i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
if (i < 0)
- goto alloc_fail5;
+ goto err_free_write_urbs;
if (h.usb_cdc_country_functional_desc) { /* export the country data */
struct usb_cdc_country_functional_desc * cfd =
@@ -1480,20 +1486,21 @@ skip_countries:
acm->nb_index = 0;
acm->nb_size = 0;
- dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
-
acm->line.dwDTERate = cpu_to_le32(9600);
acm->line.bDataBits = 8;
acm_set_line(acm, &acm->line);
- usb_driver_claim_interface(&acm_driver, data_interface, acm);
- usb_set_intfdata(data_interface, acm);
+ if (!acm->combined_interfaces) {
+ rv = usb_driver_claim_interface(&acm_driver, data_interface, acm);
+ if (rv)
+ goto err_remove_files;
+ }
tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
&control_interface->dev);
if (IS_ERR(tty_dev)) {
rv = PTR_ERR(tty_dev);
- goto alloc_fail6;
+ goto err_release_data_interface;
}
if (quirks & CLEAR_HALT_CONDITIONS) {
@@ -1501,32 +1508,39 @@ skip_countries:
usb_clear_halt(usb_dev, acm->out);
}
+ dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
+
return 0;
-alloc_fail6:
+
+err_release_data_interface:
+ if (!acm->combined_interfaces) {
+ /* Clear driver data so that disconnect() returns early. */
+ usb_set_intfdata(data_interface, NULL);
+ usb_driver_release_interface(&acm_driver, data_interface);
+ }
+err_remove_files:
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
device_remove_file(&acm->control->dev,
&dev_attr_iCountryCodeRelDate);
- kfree(acm->country_codes);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
-alloc_fail5:
- usb_set_intfdata(intf, NULL);
+err_free_write_urbs:
for (i = 0; i < ACM_NW; i++)
usb_free_urb(acm->wb[i].urb);
-alloc_fail4:
+err_free_read_urbs:
for (i = 0; i < num_rx_buf; i++)
usb_free_urb(acm->read_urbs[i]);
acm_read_buffers_free(acm);
usb_free_urb(acm->ctrlurb);
-alloc_fail3:
+err_free_write_buffers:
acm_write_buffers_free(acm);
-alloc_fail2:
+err_free_ctrl_buffer:
usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
-alloc_fail1:
+err_put_port:
tty_port_put(&acm->port);
-alloc_fail:
+
return rv;
}
@@ -1540,8 +1554,14 @@ static void acm_disconnect(struct usb_interface *intf)
if (!acm)
return;
- mutex_lock(&acm->mutex);
acm->disconnected = true;
+ /*
+ * there is a circular dependency. acm_softint() can resubmit
+ * the URBs in error handling so we need to block any
+ * submission right away
+ */
+ acm_poison_urbs(acm);
+ mutex_lock(&acm->mutex);
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
@@ -1560,7 +1580,6 @@ static void acm_disconnect(struct usb_interface *intf)
tty_kref_put(tty);
}
- acm_kill_urbs(acm);
cancel_delayed_work_sync(&acm->dwork);
tty_unregister_device(acm_tty_driver, acm->minor);
@@ -1602,7 +1621,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
if (cnt)
return 0;
- acm_kill_urbs(acm);
+ acm_poison_urbs(acm);
cancel_delayed_work_sync(&acm->dwork);
acm->urbs_in_error_delay = 0;
@@ -1615,6 +1634,7 @@ static int acm_resume(struct usb_interface *intf)
struct urb *urb;
int rv = 0;
+ acm_unpoison_urbs(acm);
spin_lock_irq(&acm->write_lock);
if (--acm->susp_count)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 6ade3daf7858..76ac5d6555ae 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* Fibocom L850-GL LTE Modem */
+ { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index fc3269f5faf1..1a9789ec5847 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
goto unlock;
- if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
+ if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL ||
+ hsotg->flags.b.port_connect_status == 0)
goto skip_power_saving;
/*
@@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
dwc2_writel(hsotg, hprt0, HPRT0);
/* Wait for the HPRT0.PrtSusp register field to be set */
- if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
+ if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/*
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 3d3918a8d5fb..4c5c6972124a 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -120,6 +120,8 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index fcaf04483ad0..3de291ab951a 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -244,6 +244,9 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
struct device *dev = qcom->dev;
int ret;
+ if (has_acpi_companion(dev))
+ return 0;
+
qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
if (IS_ERR(qcom->icc_path_ddr)) {
dev_err(dev, "failed to get usb-ddr path: %ld\n",
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4a337f348651..c7ef218e7a8c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -791,10 +791,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
reg &= ~DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
- dep->stream_capable = false;
- dep->type = 0;
- dep->flags = 0;
-
/* Clear out the ep descriptors for non-ep0 */
if (dep->number > 1) {
dep->endpoint.comp_desc = NULL;
@@ -803,6 +799,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dwc3_remove_requests(dwc, dep);
+ dep->stream_capable = false;
+ dep->type = 0;
+ dep->flags = 0;
+
return 0;
}
@@ -2083,7 +2083,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
u32 reg;
speed = dwc->gadget_max_speed;
- if (speed > dwc->maximum_speed)
+ if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
speed = dwc->maximum_speed;
if (speed == USB_SPEED_SUPER_PLUS &&
@@ -2523,6 +2523,7 @@ static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
+ dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
dwc->gadget_ssp_rate = rate;
spin_unlock_irqrestore(&dwc->lock, flags);
}
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 8d387e0e4d91..c80f9bd51b75 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -153,6 +153,11 @@ static int udc_pci_probe(
pci_set_master(pdev);
pci_try_set_mwi(pdev);
+ dev->phys_addr = resource;
+ dev->irq = pdev->irq;
+ dev->pdev = pdev;
+ dev->dev = &pdev->dev;
+
/* init dma pools */
if (use_dma) {
retval = init_dma_pools(dev);
@@ -160,11 +165,6 @@ static int udc_pci_probe(
goto err_dma;
}
- dev->phys_addr = resource;
- dev->irq = pdev->irq;
- dev->pdev = pdev;
- dev->dev = &pdev->dev;
-
/* general probing */
if (udc_probe(dev)) {
retval = -ENODEV;
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index fe010cc61f19..2f27dc0d9c6b 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
if (mtk->lpm_support)
xhci->quirks |= XHCI_LPM_SUPPORT;
+
+ /*
+ * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
+ * and it's 3 when support it.
+ */
+ if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
}
/* called during probe() after chip reset completes */
@@ -548,7 +555,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
if (ret)
goto put_usb3_hcd;
- if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
+ !(xhci->quirks & XHCI_BROKEN_STREAMS))
xhci->shared_hcd->can_do_streams = 1;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 1cd87729ba60..fc0457db62e1 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2004,10 +2004,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_DISCONNECT_99:
- musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
- schedule_delayed_work(&musb->irq_work,
- msecs_to_jiffies(1000));
- break;
+ if (musb->quirk_retries && !musb->flush_irq_work) {
+ musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ musb->quirk_retries--;
+ break;
+ }
+ fallthrough;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 3209b5ddd30c..a20a8380ca0c 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -594,6 +594,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
pr_err("invalid port number %d\n", wIndex);
goto error;
}
+ if (wValue >= 32)
+ goto error;
if (hcd->speed == HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 08f742fd2409..b6cc53ba980c 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -4,9 +4,13 @@
#ifndef __MLX5_VDPA_H__
#define __MLX5_VDPA_H__
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/vdpa.h>
#include <linux/mlx5/driver.h>
+#define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+
struct mlx5_vdpa_direct_mr {
u64 start;
u64 end;
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index d300f799efcd..3908ff28eec0 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -219,6 +219,11 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m
mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
}
+static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev)
+{
+ return &mvdev->mdev->pdev->dev;
+}
+
static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
struct vhost_iotlb *iotlb)
{
@@ -234,7 +239,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
u64 pa;
u64 paend;
struct scatterlist *sg;
- struct device *dma = mvdev->mdev->device;
+ struct device *dma = get_dma_device(mvdev);
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -291,7 +296,7 @@ err_map:
static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
- struct device *dma = mvdev->mdev->device;
+ struct device *dma = get_dma_device(mvdev);
destroy_direct_mr(mvdev, mr);
dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index 96e6421c5d1c..6521cbd0f5c2 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -246,7 +246,8 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
if (err)
goto err_key;
- kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
+ kick_addr = mdev->bar_addr + offset;
+
res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
if (!res->kick_addr) {
err = -ENOMEM;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 71397fdafa6a..4d2809c7d4e3 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -820,7 +820,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
- !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
+ !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
@@ -1169,6 +1169,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
return;
}
mvq->avail_idx = attr.available_index;
+ mvq->used_idx = attr.used_index;
}
static void suspend_vqs(struct mlx5_vdpa_net *ndev)
@@ -1426,6 +1427,7 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
return -EINVAL;
}
+ mvq->used_idx = state->avail_index;
mvq->avail_idx = state->avail_index;
return 0;
}
@@ -1443,7 +1445,11 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
* that cares about emulating the index after vq is stopped.
*/
if (!mvq->initialized) {
- state->avail_index = mvq->avail_idx;
+ /* Firmware returns a wrong value for the available index.
+ * Since both values should be identical, we take the value of
+ * used_idx which is reported correctly.
+ */
+ state->avail_index = mvq->used_idx;
return 0;
}
@@ -1452,7 +1458,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
return err;
}
- state->avail_index = attr.available_index;
+ state->avail_index = attr.used_index;
return 0;
}
@@ -1540,21 +1546,11 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
}
}
-static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
-{
- int i;
-
- for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
- ndev->vqs[i].avail_idx = 0;
- ndev->vqs[i].used_idx = 0;
- }
-}
-
/* TODO: cross-endian support */
static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
{
return virtio_legacy_is_little_endian() ||
- (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+ (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
}
static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
@@ -1785,7 +1781,6 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev);
- clear_virtqueues(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0;
@@ -1907,6 +1902,19 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.free = mlx5_vdpa_free,
};
+static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+ u16 hw_mtu;
+ int err;
+
+ err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+ if (err)
+ return err;
+
+ *mtu = hw_mtu - MLX5V_ETH_HARD_MTU;
+ return 0;
+}
+
static int alloc_resources(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_net_resources *res = &ndev->res;
@@ -1992,7 +2000,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
init_mvqs(ndev);
mutex_init(&ndev->reslock);
config = &ndev->config;
- err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
+ err = query_mtu(mdev, &ndev->mtu);
if (err)
goto err_mtu;
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index ac3c1dd3edef..4abddbebd4b2 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -42,6 +42,6 @@ config VFIO_PCI_IGD
config VFIO_PCI_NVLINK2
def_bool y
- depends on VFIO_PCI && PPC_POWERNV
+ depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
help
VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index be444407664a..45cbfd4879a5 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -739,6 +739,12 @@ out:
ret = vfio_lock_acct(dma, lock_acct, false);
unpin_out:
+ if (batch->size == 1 && !batch->offset) {
+ /* May be a VM_PFNMAP pfn, which the batch can't remember. */
+ put_pfn(pfn, dma->prot);
+ batch->size = 0;
+ }
+
if (ret < 0) {
if (pinned && !rsvd) {
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 44a5cd2f54cc..3406067985b1 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1333,6 +1333,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
+ if (!ops->cursor)
+ return;
+
ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
}
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index c8b0ae676809..4dc9077dd2ac 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1031,7 +1031,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
if (!pdev) {
pr_err("Unable to find PCI Hyper-V video\n");
- kfree(info->apertures);
return -ENODEV;
}
@@ -1129,7 +1128,6 @@ getmem_done:
} else {
pci_dev_put(pdev);
}
- kfree(info->apertures);
return 0;
@@ -1141,7 +1139,6 @@ err2:
err1:
if (!gen2vm)
pci_dev_put(pdev);
- kfree(info->apertures);
return -ENOMEM;
}
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 41645fe6ad48..ea0efd290c37 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -50,11 +50,11 @@ config XEN_BALLOON_MEMORY_HOTPLUG
SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
-config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+config XEN_MEMORY_HOTPLUG_LIMIT
int "Hotplugged memory limit (in GiB) for a PV guest"
default 512
depends on XEN_HAVE_PVMMU
- depends on XEN_BALLOON_MEMORY_HOTPLUG
+ depends on MEMORY_HOTPLUG
help
Maxmium amount of memory (in GiB) that a PV guest can be
expanded to when using memory hotplug.
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 8236e2364eeb..7bbfd58958bc 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -110,7 +110,7 @@ struct irq_info {
unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
u64 eoi_time; /* Time in jiffies when to EOI. */
- spinlock_t lock;
+ raw_spinlock_t lock;
union {
unsigned short virq;
@@ -312,7 +312,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
info->evtchn = evtchn;
info->cpu = cpu;
info->mask_reason = EVT_MASK_REASON_EXPLICIT;
- spin_lock_init(&info->lock);
+ raw_spin_lock_init(&info->lock);
ret = set_evtchn_to_irq(evtchn, irq);
if (ret < 0)
@@ -472,28 +472,28 @@ static void do_mask(struct irq_info *info, u8 reason)
{
unsigned long flags;
- spin_lock_irqsave(&info->lock, flags);
+ raw_spin_lock_irqsave(&info->lock, flags);
if (!info->mask_reason)
mask_evtchn(info->evtchn);
info->mask_reason |= reason;
- spin_unlock_irqrestore(&info->lock, flags);
+ raw_spin_unlock_irqrestore(&info->lock, flags);
}
static void do_unmask(struct irq_info *info, u8 reason)
{
unsigned long flags;
- spin_lock_irqsave(&info->lock, flags);
+ raw_spin_lock_irqsave(&info->lock, flags);
info->mask_reason &= ~reason;
if (!info->mask_reason)
unmask_evtchn(info->evtchn);
- spin_unlock_irqrestore(&info->lock, flags);
+ raw_spin_unlock_irqrestore(&info->lock, flags);
}
#ifdef CONFIG_X86
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 92ed7d5df677..09d6f7229db9 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -275,6 +275,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
bio.bi_opf = dio_bio_write_op(iocb);
task_io_account_write(ret);
}
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ bio.bi_opf |= REQ_NOWAIT;
if (iocb->ki_flags & IOCB_HIPRI)
bio_set_polled(&bio, iocb);
@@ -428,6 +430,8 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
bio->bi_opf = dio_bio_write_op(iocb);
task_io_account_write(bio->bi_iter.bi_size);
}
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ bio->bi_opf |= REQ_NOWAIT;
dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
@@ -1240,13 +1244,13 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate)
lockdep_assert_held(&bdev->bd_mutex);
- clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
-
rescan:
ret = blk_drop_partitions(bdev);
if (ret)
return ret;
+ clear_bit(GD_NEED_PART_SCAN, &disk->state);
+
/*
* Historically we only set the capacity to zero for devices that
* support partitions (independ of actually having partitions created).
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index b634c42115ea..b4fb997eda16 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -7,10 +7,12 @@ subdir-ccflags-y += -Wmissing-format-attribute
subdir-ccflags-y += -Wmissing-prototypes
subdir-ccflags-y += -Wold-style-definition
subdir-ccflags-y += -Wmissing-include-dirs
-subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
-subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
-subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
-subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
+condflags := \
+ $(call cc-option, -Wunused-but-set-variable) \
+ $(call cc-option, -Wunused-const-variable) \
+ $(call cc-option, -Wpacked-not-aligned) \
+ $(call cc-option, -Wstringop-truncation)
+subdir-ccflags-y += $(condflags)
# The following turn off the warnings enabled by -Wextra
subdir-ccflags-y += -Wno-missing-field-initializers
subdir-ccflags-y += -Wno-sign-compare
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 3a9c1e046ebe..d05f73530af7 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -81,6 +81,9 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
struct btrfs_dev_replace_item *ptr;
u64 src_devid;
+ if (!dev_root)
+ return 0;
+
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 41b718cfea40..289f1f09481d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2387,8 +2387,9 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
} else {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
fs_info->dev_root = root;
- btrfs_init_devices_late(fs_info);
}
+ /* Initialize fs_info for all devices in any case */
+ btrfs_init_devices_late(fs_info);
/* If IGNOREDATACSUMS is set don't bother reading the csum root. */
if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
@@ -3009,6 +3010,21 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
}
}
+ /*
+ * btrfs_find_orphan_roots() is responsible for finding all the dead
+ * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
+ * them into the fs_info->fs_roots_radix tree. This must be done before
+ * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
+ * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
+ * item before the root's tree is deleted - this means that if we unmount
+ * or crash before the deletion completes, on the next mount we will not
+ * delete what remains of the tree because the orphan item does not
+ * exists anymore, which is what tells us we have a pending deletion.
+ */
+ ret = btrfs_find_orphan_roots(fs_info);
+ if (ret)
+ goto out;
+
ret = btrfs_cleanup_fs_roots(fs_info);
if (ret)
goto out;
@@ -3068,7 +3084,6 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
}
}
- ret = btrfs_find_orphan_roots(fs_info);
out:
return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7cdf65be3707..a520775949a0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3099,11 +3099,13 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
* @bio_offset: offset to the beginning of the bio (in bytes)
* @page: page where is the data to be verified
* @pgoff: offset inside the page
+ * @start: logical offset in the file
*
* The length of such check is always one sector size.
*/
static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
- u32 bio_offset, struct page *page, u32 pgoff)
+ u32 bio_offset, struct page *page, u32 pgoff,
+ u64 start)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
@@ -3130,8 +3132,8 @@ static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
kunmap_atomic(kaddr);
return 0;
zeroit:
- btrfs_print_data_csum_error(BTRFS_I(inode), page_offset(page) + pgoff,
- csum, csum_expected, io_bio->mirror_num);
+ btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
+ io_bio->mirror_num);
if (io_bio->device)
btrfs_dev_stat_inc_and_print(io_bio->device,
BTRFS_DEV_STAT_CORRUPTION_ERRS);
@@ -3184,7 +3186,8 @@ int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
pg_off += sectorsize, bio_offset += sectorsize) {
int ret;
- ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off);
+ ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off,
+ page_offset(page) + pg_off);
if (ret < 0)
return -EIO;
}
@@ -7910,7 +7913,8 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
ASSERT(pgoff < PAGE_SIZE);
if (uptodate &&
(!csum || !check_data_csum(inode, io_bio,
- bio_offset, bvec.bv_page, pgoff))) {
+ bio_offset, bvec.bv_page,
+ pgoff, start))) {
clean_io_failure(fs_info, failure_tree, io_tree,
start, bvec.bv_page,
btrfs_ino(BTRFS_I(inode)),
@@ -8169,10 +8173,6 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
- WARN_ON_ONCE(write && btrfs_is_zoned(fs_info) &&
- fs_info->max_zone_append_size &&
- bio_op(bio) != REQ_OP_ZONE_APPEND);
-
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
status = extract_ordered_extent(BTRFS_I(inode), bio,
file_offset);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 14ff388fd3bd..f0b9ef13153a 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -226,7 +226,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
{
struct btrfs_qgroup_list *list;
- btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
list_del(&qgroup->dirty);
while (!list_empty(&qgroup->groups)) {
list = list_first_entry(&qgroup->groups,
@@ -243,7 +242,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
list_del(&list->next_member);
kfree(list);
}
- kfree(qgroup);
}
/* must be called with qgroup_lock held */
@@ -569,6 +567,8 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
qgroup = rb_entry(n, struct btrfs_qgroup, node);
rb_erase(n, &fs_info->qgroup_tree);
__del_qgroup_rb(fs_info, qgroup);
+ btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
+ kfree(qgroup);
}
/*
* We call btrfs_free_qgroup_config() when unmounting
@@ -1578,6 +1578,14 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
spin_lock(&fs_info->qgroup_lock);
del_qgroup_rb(fs_info, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
+
+ /*
+ * Remove the qgroup from sysfs now without holding the qgroup_lock
+ * spinlock, since the sysfs_remove_group() function needs to take
+ * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
+ */
+ btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
+ kfree(qgroup);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index bc3b33efddc5..1c6810bbaf8b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -7448,6 +7448,9 @@ static int btrfs_device_init_dev_stats(struct btrfs_device *device,
int item_size;
int i, ret, slot;
+ if (!device->fs_info->dev_root)
+ return 0;
+
key.objectid = BTRFS_DEV_STATS_OBJECTID;
key.type = BTRFS_PERSISTENT_ITEM_KEY;
key.offset = device->devid;
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index fe03cbdae959..bf52e9326ebe 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -18,6 +18,7 @@ config CIFS
select CRYPTO_AES
select CRYPTO_LIB_DES
select KEYS
+ select DNS_RESOLVER
help
This is the client VFS module for the SMB3 family of NAS protocols,
(including support for the most recent, most secure dialect SMB3.1.1)
@@ -112,7 +113,6 @@ config CIFS_WEAK_PW_HASH
config CIFS_UPCALL
bool "Kerberos/SPNEGO advanced session setup"
depends on CIFS
- select DNS_RESOLVER
help
Enables an upcall mechanism for CIFS which accesses userspace helper
utilities to provide SPNEGO packaged (RFC 4178) Kerberos tickets
@@ -179,7 +179,6 @@ config CIFS_DEBUG_DUMP_KEYS
config CIFS_DFS_UPCALL
bool "DFS feature support"
depends on CIFS
- select DNS_RESOLVER
help
Distributed File System (DFS) support is used to access shares
transparently in an enterprise name space, even if the share
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 5213b20843b5..3ee3b7de4ded 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -10,13 +10,14 @@ cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \
cifs_unicode.o nterr.o cifsencrypt.o \
readdir.o ioctl.o sess.o export.o smb1ops.o unc.o winucase.o \
smb2ops.o smb2maperror.o smb2transport.o \
- smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o
+ smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
+ dns_resolve.o
cifs-$(CONFIG_CIFS_XATTR) += xattr.o
cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
-cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o dfs_cache.o
+cifs-$(CONFIG_CIFS_DFS_UPCALL) += cifs_dfs_ref.o dfs_cache.o
cifs-$(CONFIG_CIFS_SWN_UPCALL) += netlink.o cifs_swn.o
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 2be22a5c690f..d178cf85e926 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1130,8 +1130,7 @@ static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
}
/* If it's any one of the ACE we're replacing, skip! */
- if (!mode_from_sid &&
- ((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||
+ if (((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||
(compare_sids(&pntace->sid, pownersid) == 0) ||
(compare_sids(&pntace->sid, pgrpsid) == 0) ||
(compare_sids(&pntace->sid, &sid_everyone) == 0) ||
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 099ad9f3660b..5ddd20b62484 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -476,7 +476,8 @@ static int cifs_show_devname(struct seq_file *m, struct dentry *root)
seq_puts(m, "none");
else {
convert_delimiter(devname, '/');
- seq_puts(m, devname);
+ /* escape all spaces in share names */
+ seq_escape(m, devname, " \t");
kfree(devname);
}
return 0;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 31fc8695abd6..ec824ab8c5ca 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -919,8 +919,8 @@ struct cifs_ses {
bool binding:1; /* are we binding the session? */
__u16 session_flags;
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
- __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
- __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
+ __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+ __u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
__u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
@@ -1283,8 +1283,6 @@ struct cifs_aio_ctx {
bool direct_io;
};
-struct cifs_readdata;
-
/* asynchronous read support */
struct cifs_readdata {
struct kref refcount;
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 64fe5a47b5e8..9adc74bd9f8f 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -147,6 +147,11 @@
*/
#define SMB3_SIGN_KEY_SIZE (16)
+/*
+ * Size of the smb3 encryption/decryption keys
+ */
+#define SMB3_ENC_DEC_KEY_SIZE (32)
+
#define CIFS_CLIENT_CHALLENGE_SIZE (8)
#define CIFS_SERVER_CHALLENGE_SIZE (8)
#define CIFS_HMAC_MD5_HASH_SIZE (16)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index eec8a2052da2..24668eb006c6 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -87,7 +87,6 @@ static void cifs_prune_tlinks(struct work_struct *work);
*
* This should be called with server->srv_mutex held.
*/
-#ifdef CONFIG_CIFS_DFS_UPCALL
static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
{
int rc;
@@ -124,6 +123,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
return !rc ? -1 : 0;
}
+#ifdef CONFIG_CIFS_DFS_UPCALL
/* These functions must be called with server->srv_mutex held */
static void reconn_set_next_dfs_target(struct TCP_Server_Info *server,
struct cifs_sb_info *cifs_sb,
@@ -321,14 +321,29 @@ cifs_reconnect(struct TCP_Server_Info *server)
#endif
#ifdef CONFIG_CIFS_DFS_UPCALL
+ if (cifs_sb && cifs_sb->origin_fullpath)
/*
* Set up next DFS target server (if any) for reconnect. If DFS
* feature is disabled, then we will retry last server we
* connected to before.
*/
reconn_set_next_dfs_target(server, cifs_sb, &tgt_list, &tgt_it);
+ else {
+#endif
+ /*
+ * Resolve the hostname again to make sure that IP address is up-to-date.
+ */
+ rc = reconn_set_ipaddr_from_hostname(server);
+ if (rc) {
+ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+ __func__, rc);
+ }
+
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ }
#endif
+
#ifdef CONFIG_CIFS_SWN_UPCALL
}
#endif
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 26de4329d161..042e24aad410 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -165,6 +165,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
goto posix_open_ret;
}
} else {
+ cifs_revalidate_mapping(*pinode);
cifs_fattr_to_inode(*pinode, &fattr);
}
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index 99a1951a01ec..d9a990c99121 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -58,6 +58,7 @@
#define SMB2_HMACSHA256_SIZE (32)
#define SMB2_CMACAES_SIZE (16)
#define SMB3_SIGNKEY_SIZE (16)
+#define SMB3_GCM128_CRYPTKEY_SIZE (16)
#define SMB3_GCM256_CRYPTKEY_SIZE (32)
/* Maximum buffer size value we can send with 1 credit */
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index b50164e2c88d..aac384f69f74 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -754,8 +754,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
}
}
spin_unlock(&cifs_tcp_ses_lock);
- cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
- return false;
+ cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
+ return true;
}
void
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 9bae7e8deb09..f703204fb185 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -2038,6 +2038,7 @@ smb2_duplicate_extents(const unsigned int xid,
{
int rc;
unsigned int ret_data_len;
+ struct inode *inode;
struct duplicate_extents_to_file dup_ext_buf;
struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
@@ -2054,10 +2055,21 @@ smb2_duplicate_extents(const unsigned int xid,
cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
src_off, dest_off, len);
- rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
- if (rc)
- goto duplicate_extents_out;
+ inode = d_inode(trgtfile->dentry);
+ if (inode->i_size < dest_off + len) {
+ rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
+ if (rc)
+ goto duplicate_extents_out;
+ /*
+ * Although also could set plausible allocation size (i_blocks)
+ * here in addition to setting the file size, in reflink
+ * it is likely that the target file is sparse. Its allocation
+ * size will be queried on next revalidate, but it is important
+ * to make sure that file's cached size is updated immediately
+ */
+ cifs_setsize(inode, dest_off + len);
+ }
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid,
FSCTL_DUPLICATE_EXTENTS_TO_FILE,
@@ -4158,7 +4170,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
if (ses->Suid == ses_id) {
ses_enc_key = enc ? ses->smb3encryptionkey :
ses->smb3decryptionkey;
- memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
+ memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
spin_unlock(&cifs_tcp_ses_lock);
return 0;
}
@@ -4185,7 +4197,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
int rc = 0;
struct scatterlist *sg;
u8 sign[SMB2_SIGNATURE_SIZE] = {};
- u8 key[SMB3_SIGN_KEY_SIZE];
+ u8 key[SMB3_ENC_DEC_KEY_SIZE];
struct aead_request *req;
char *iv;
unsigned int iv_len;
@@ -4209,10 +4221,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
tfm = enc ? server->secmech.ccmaesencrypt :
server->secmech.ccmaesdecrypt;
- if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+ if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
else
- rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
+ rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
if (rc) {
cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index ebccd71cc60a..e6fa76ab70be 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -298,7 +298,8 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
{
unsigned char zero = 0x0;
__u8 i[4] = {0, 0, 0, 1};
- __u8 L[4] = {0, 0, 0, 128};
+ __u8 L128[4] = {0, 0, 0, 128};
+ __u8 L256[4] = {0, 0, 1, 0};
int rc = 0;
unsigned char prfhash[SMB2_HMACSHA256_SIZE];
unsigned char *hashptr = prfhash;
@@ -354,8 +355,14 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
goto smb3signkey_ret;
}
- rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
- L, 4);
+ if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
+ rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
+ L256, 4);
+ } else {
+ rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
+ L128, 4);
+ }
if (rc) {
cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
goto smb3signkey_ret;
@@ -390,6 +397,9 @@ generate_smb3signingkey(struct cifs_ses *ses,
const struct derivation_triplet *ptriplet)
{
int rc;
+#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+ struct TCP_Server_Info *server = ses->server;
+#endif
/*
* All channels use the same encryption/decryption keys but
@@ -422,11 +432,11 @@ generate_smb3signingkey(struct cifs_ses *ses,
rc = generate_key(ses, ptriplet->encryption.label,
ptriplet->encryption.context,
ses->smb3encryptionkey,
- SMB3_SIGN_KEY_SIZE);
+ SMB3_ENC_DEC_KEY_SIZE);
rc = generate_key(ses, ptriplet->decryption.label,
ptriplet->decryption.context,
ses->smb3decryptionkey,
- SMB3_SIGN_KEY_SIZE);
+ SMB3_ENC_DEC_KEY_SIZE);
if (rc)
return rc;
}
@@ -442,14 +452,23 @@ generate_smb3signingkey(struct cifs_ses *ses,
*/
cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
&ses->Suid);
+ cifs_dbg(VFS, "Cipher type %d\n", server->cipher_type);
cifs_dbg(VFS, "Session Key %*ph\n",
SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
cifs_dbg(VFS, "Signing Key %*ph\n",
SMB3_SIGN_KEY_SIZE, ses->smb3signingkey);
- cifs_dbg(VFS, "ServerIn Key %*ph\n",
- SMB3_SIGN_KEY_SIZE, ses->smb3encryptionkey);
- cifs_dbg(VFS, "ServerOut Key %*ph\n",
- SMB3_SIGN_KEY_SIZE, ses->smb3decryptionkey);
+ if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
+ cifs_dbg(VFS, "ServerIn Key %*ph\n",
+ SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey);
+ cifs_dbg(VFS, "ServerOut Key %*ph\n",
+ SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey);
+ } else {
+ cifs_dbg(VFS, "ServerIn Key %*ph\n",
+ SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey);
+ cifs_dbg(VFS, "ServerOut Key %*ph\n",
+ SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey);
+ }
#endif
return rc;
}
diff --git a/fs/file.c b/fs/file.c
index f3a4bac2cbe9..f633348029a5 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -629,17 +629,30 @@ int close_fd(unsigned fd)
}
EXPORT_SYMBOL(close_fd); /* for ksys_close() */
+/**
+ * last_fd - return last valid index into fd table
+ * @cur_fds: files struct
+ *
+ * Context: Either rcu read lock or files_lock must be held.
+ *
+ * Returns: Last valid index into fdtable.
+ */
+static inline unsigned last_fd(struct fdtable *fdt)
+{
+ return fdt->max_fds - 1;
+}
+
static inline void __range_cloexec(struct files_struct *cur_fds,
unsigned int fd, unsigned int max_fd)
{
struct fdtable *fdt;
- if (fd > max_fd)
- return;
-
+ /* make sure we're using the correct maximum value */
spin_lock(&cur_fds->file_lock);
fdt = files_fdtable(cur_fds);
- bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
+ max_fd = min(last_fd(fdt), max_fd);
+ if (fd <= max_fd)
+ bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
spin_unlock(&cur_fds->file_lock);
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 97076d3f562f..8fb9602d79b4 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -162,8 +162,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
int error;
error = init_threads(sdp);
- if (error)
+ if (error) {
+ gfs2_withdraw_delayed(sdp);
return error;
+ }
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
if (gfs2_withdrawn(sdp)) {
@@ -750,11 +752,13 @@ void gfs2_freeze_func(struct work_struct *work)
static int gfs2_freeze(struct super_block *sb)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
- int error = 0;
+ int error;
mutex_lock(&sdp->sd_freeze_mutex);
- if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
+ if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
+ error = -EBUSY;
goto out;
+ }
for (;;) {
if (gfs2_withdrawn(sdp)) {
@@ -795,10 +799,10 @@ static int gfs2_unfreeze(struct super_block *sb)
struct gfs2_sbd *sdp = sb->s_fs_info;
mutex_lock(&sdp->sd_freeze_mutex);
- if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
+ if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
!gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
mutex_unlock(&sdp->sd_freeze_mutex);
- return 0;
+ return -EINVAL;
}
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 29e407762626..743a005a5c64 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -144,7 +144,7 @@ static char *follow_link(char *link)
char *name, *resolved, *end;
int n;
- name = __getname();
+ name = kmalloc(PATH_MAX, GFP_KERNEL);
if (!name) {
n = -ENOMEM;
goto out_free;
@@ -173,12 +173,11 @@ static char *follow_link(char *link)
goto out_free;
}
- __putname(name);
- kfree(link);
+ kfree(name);
return resolved;
out_free:
- __putname(name);
+ kfree(name);
return ERR_PTR(n);
}
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 3dc10bfd8c3b..4eba531bea5a 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -16,7 +16,6 @@
#include <linux/rculist_nulls.h>
#include <linux/cpu.h>
#include <linux/tracehook.h>
-#include <linux/freezer.h>
#include "../kernel/sched/sched.h"
#include "io-wq.h"
@@ -388,11 +387,9 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
static bool io_flush_signals(void)
{
- if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {
+ if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
__set_current_state(TASK_RUNNING);
- if (current->task_works)
- task_work_run();
- clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);
+ tracehook_notify_signal();
return true;
}
return false;
@@ -418,6 +415,7 @@ static void io_worker_handle_work(struct io_worker *worker)
{
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
+ bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
do {
struct io_wq_work *work;
@@ -447,6 +445,9 @@ get_next:
unsigned int hash = io_get_work_hash(work);
next_hashed = wq_next_work(work);
+
+ if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
+ work->flags |= IO_WQ_WORK_CANCEL;
wq->do_work(work);
io_assign_current_work(worker, NULL);
@@ -487,7 +488,7 @@ static int io_wqe_worker(void *data)
worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
io_wqe_inc_running(worker);
- sprintf(buf, "iou-wrk-%d", wq->task_pid);
+ snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task_pid);
set_task_comm(current, buf);
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
@@ -505,10 +506,15 @@ loop:
if (io_flush_signals())
continue;
ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
- if (try_to_freeze() || ret)
- continue;
- if (fatal_signal_pending(current))
+ if (signal_pending(current)) {
+ struct ksignal ksig;
+
+ if (!get_signal(&ksig))
+ continue;
break;
+ }
+ if (ret)
+ continue;
/* timed out, exit unless we're the fixed worker */
if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
!(worker->flags & IO_WORKER_F_FIXED))
@@ -709,16 +715,20 @@ static int io_wq_manager(void *data)
char buf[TASK_COMM_LEN];
int node;
- sprintf(buf, "iou-mgr-%d", wq->task_pid);
+ snprintf(buf, sizeof(buf), "iou-mgr-%d", wq->task_pid);
set_task_comm(current, buf);
do {
set_current_state(TASK_INTERRUPTIBLE);
io_wq_check_workers(wq);
schedule_timeout(HZ);
- try_to_freeze();
- if (fatal_signal_pending(current))
+ if (signal_pending(current)) {
+ struct ksignal ksig;
+
+ if (!get_signal(&ksig))
+ continue;
set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ }
} while (!test_bit(IO_WQ_BIT_EXIT, &wq->state));
io_wq_check_workers(wq);
@@ -1065,7 +1075,11 @@ static void io_wq_destroy(struct io_wq *wq)
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
- WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
+ struct io_cb_cancel_data match = {
+ .fn = io_wq_work_match_all,
+ .cancel_all = true,
+ };
+ io_wqe_cancel_pending_work(wqe, &match);
kfree(wqe);
}
io_wq_put_hash(wq->hash);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 543551d70327..bd14327c8e7e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -78,7 +78,6 @@
#include <linux/task_work.h>
#include <linux/pagemap.h>
#include <linux/io_uring.h>
-#include <linux/freezer.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -698,6 +697,7 @@ enum {
REQ_F_NO_FILE_TABLE_BIT,
REQ_F_LTIMEOUT_ACTIVE_BIT,
REQ_F_COMPLETE_INLINE_BIT,
+ REQ_F_REISSUE_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -741,6 +741,8 @@ enum {
REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
/* completion is deferred through io_comp_state */
REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
+ /* caller should reissue async */
+ REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
};
struct async_poll {
@@ -1095,8 +1097,6 @@ static bool io_match_task(struct io_kiocb *head,
io_for_each_link(req, head) {
if (req->flags & REQ_F_INFLIGHT)
return true;
- if (req->task->files == files)
- return true;
}
return false;
}
@@ -1216,7 +1216,7 @@ static void io_prep_async_work(struct io_kiocb *req)
if (req->flags & REQ_F_ISREG) {
if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file));
- } else {
+ } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
}
@@ -1239,16 +1239,16 @@ static void io_queue_async_work(struct io_kiocb *req)
BUG_ON(!tctx);
BUG_ON(!tctx->io_wq);
- trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
- &req->work, req->flags);
/* init ->work of the whole link before punting */
io_prep_async_link(req);
+ trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
+ &req->work, req->flags);
io_wq_enqueue(tctx->io_wq, &req->work);
if (link)
io_queue_linked_timeout(link);
}
-static void io_kill_timeout(struct io_kiocb *req)
+static void io_kill_timeout(struct io_kiocb *req, int status)
{
struct io_timeout_data *io = req->async_data;
int ret;
@@ -1258,31 +1258,11 @@ static void io_kill_timeout(struct io_kiocb *req)
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
- io_cqring_fill_event(req, 0);
+ io_cqring_fill_event(req, status);
io_put_req_deferred(req, 1);
}
}
-/*
- * Returns true if we found and killed one or more timeouts
- */
-static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
- struct files_struct *files)
-{
- struct io_kiocb *req, *tmp;
- int canceled = 0;
-
- spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
- if (io_match_task(req, tsk, files)) {
- io_kill_timeout(req);
- canceled++;
- }
- }
- spin_unlock_irq(&ctx->completion_lock);
- return canceled != 0;
-}
-
static void __io_queue_deferred(struct io_ring_ctx *ctx)
{
do {
@@ -1327,7 +1307,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
break;
list_del_init(&req->timeout.list);
- io_kill_timeout(req);
+ io_kill_timeout(req, 0);
} while (!list_empty(&ctx->timeout_list));
ctx->cq_last_tm_flush = seq;
@@ -2499,6 +2479,11 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
return false;
return true;
}
+#else
+static bool io_rw_should_reissue(struct io_kiocb *req)
+{
+ return false;
+}
#endif
static bool io_rw_reissue(struct io_kiocb *req)
@@ -2524,13 +2509,14 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
{
int cflags = 0;
- if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
+ if (req->rw.kiocb.ki_flags & IOCB_WRITE)
+ kiocb_end_write(req);
+ if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) {
+ req->flags |= REQ_F_REISSUE;
return;
+ }
if (res != req->result)
req_set_fail_links(req);
-
- if (req->rw.kiocb.ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_rw_kbuf(req);
__io_req_complete(req, issue_flags, res, cflags);
@@ -2776,6 +2762,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_async_rw *io = req->async_data;
+ bool check_reissue = kiocb->ki_complete == io_complete_rw;
/* add previously done IO, if any */
if (io && io->bytes_done > 0) {
@@ -2791,6 +2778,18 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
__io_complete_rw(req, ret, 0, issue_flags);
else
io_rw_done(kiocb, ret);
+
+ if (check_reissue && req->flags & REQ_F_REISSUE) {
+ req->flags &= ~REQ_F_REISSUE;
+ if (!io_rw_reissue(req)) {
+ int cflags = 0;
+
+ req_set_fail_links(req);
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ cflags = io_put_rw_kbuf(req);
+ __io_req_complete(req, issue_flags, ret, cflags);
+ }
+ }
}
static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
@@ -3307,11 +3306,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
ret = io_iter_do_read(req, iter);
- if (ret == -EIOCBQUEUED) {
- if (req->async_data)
- iov_iter_revert(iter, io_size - iov_iter_count(iter));
- goto out_free;
- } else if (ret == -EAGAIN) {
+ if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
+ req->flags &= ~REQ_F_REISSUE;
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
@@ -3321,6 +3317,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = 0;
+ } else if (ret == -EIOCBQUEUED) {
+ goto out_free;
} else if (ret <= 0 || ret == io_size || !force_nonblock ||
(req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
/* read all, failed, already did sync or don't want to retry */
@@ -3433,6 +3431,11 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
else
ret2 = -EINVAL;
+ if (req->flags & REQ_F_REISSUE) {
+ req->flags &= ~REQ_F_REISSUE;
+ ret2 = -EAGAIN;
+ }
+
/*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
* retry them without IOCB_NOWAIT.
@@ -3442,8 +3445,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
/* no retry on NONBLOCK nor RWF_NOWAIT */
if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
goto done;
- if (ret2 == -EIOCBQUEUED && req->async_data)
- iov_iter_revert(iter, io_size - iov_iter_count(iter));
if (!force_nonblock || ret2 != -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
@@ -3978,6 +3979,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
static int io_provide_buffers_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
+ unsigned long size;
struct io_provide_buf *p = &req->pbuf;
u64 tmp;
@@ -3991,7 +3993,8 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
p->addr = READ_ONCE(sqe->addr);
p->len = READ_ONCE(sqe->len);
- if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
+ size = (unsigned long)p->len * p->nbufs;
+ if (!access_ok(u64_to_user_ptr(p->addr), size))
return -EFAULT;
p->bgid = READ_ONCE(sqe->buf_group);
@@ -4820,7 +4823,6 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
ret = -ENOMEM;
goto out;
}
- io = req->async_data;
memcpy(req->async_data, &__io, sizeof(__io));
return -EAGAIN;
}
@@ -5583,7 +5585,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
data->mode = io_translate_timeout_mode(flags);
hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
- io_req_track_inflight(req);
+ if (is_timeout_link)
+ io_req_track_inflight(req);
return 0;
}
@@ -6479,8 +6482,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
ret = io_init_req(ctx, req, sqe);
if (unlikely(ret)) {
fail_req:
- io_put_req(req);
- io_req_complete(req, ret);
if (link->head) {
/* fail even hard links since we don't submit */
link->head->flags |= REQ_F_FAIL_LINK;
@@ -6488,6 +6489,8 @@ fail_req:
io_req_complete(link->head, -ECANCELED);
link->head = NULL;
}
+ io_put_req(req);
+ io_req_complete(req, ret);
return ret;
}
ret = io_req_prep(req, sqe);
@@ -6740,7 +6743,7 @@ static int io_sq_thread(void *data)
char buf[TASK_COMM_LEN];
DEFINE_WAIT(wait);
- sprintf(buf, "iou-sqp-%d", sqd->task_pid);
+ snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
set_task_comm(current, buf);
current->pf_io_worker = NULL;
@@ -6755,17 +6758,25 @@ static int io_sq_thread(void *data)
int ret;
bool cap_entries, sqt_spin, needs_sched;
- if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
+ if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
+ signal_pending(current)) {
+ bool did_sig = false;
+
mutex_unlock(&sqd->lock);
+ if (signal_pending(current)) {
+ struct ksignal ksig;
+
+ did_sig = get_signal(&ksig);
+ }
cond_resched();
mutex_lock(&sqd->lock);
+ if (did_sig)
+ break;
io_run_task_work();
io_run_task_work_head(&sqd->park_task_work);
timeout = jiffies + sqd->sq_thread_idle;
continue;
}
- if (fatal_signal_pending(current))
- break;
sqt_spin = false;
cap_entries = !list_is_singular(&sqd->ctx_list);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@@ -6808,7 +6819,6 @@ static int io_sq_thread(void *data)
mutex_unlock(&sqd->lock);
schedule();
- try_to_freeze();
mutex_lock(&sqd->lock);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
io_ring_clear_wakeup_flag(ctx);
@@ -6873,7 +6883,7 @@ static int io_run_task_work_sig(void)
return 1;
if (!signal_pending(current))
return 0;
- if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))
+ if (test_thread_flag(TIF_NOTIFY_SIGNAL))
return -ERESTARTSYS;
return -EINTR;
}
@@ -8563,6 +8573,14 @@ static void io_ring_exit_work(struct work_struct *work)
struct io_tctx_node *node;
int ret;
+ /* prevent SQPOLL from submitting new requests */
+ if (ctx->sq_data) {
+ io_sq_thread_park(ctx->sq_data);
+ list_del_init(&ctx->sqd_list);
+ io_sqd_update_thread_idle(ctx->sq_data);
+ io_sq_thread_unpark(ctx->sq_data);
+ }
+
/*
* If we're doing polled IO and end up having requests being
* submitted async (out-of-line), then completions can come in while
@@ -8599,6 +8617,28 @@ static void io_ring_exit_work(struct work_struct *work)
io_ring_ctx_free(ctx);
}
+/* Returns true if we found and killed one or more timeouts */
+static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
+ struct files_struct *files)
+{
+ struct io_kiocb *req, *tmp;
+ int canceled = 0;
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+ if (io_match_task(req, tsk, files)) {
+ io_kill_timeout(req, -ECANCELED);
+ canceled++;
+ }
+ }
+ if (canceled != 0)
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+ if (canceled != 0)
+ io_cqring_ev_posted(ctx);
+ return canceled != 0;
+}
+
static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
unsigned long index;
@@ -8614,14 +8654,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_unregister_personality(ctx, index);
mutex_unlock(&ctx->uring_lock);
- /* prevent SQPOLL from submitting new requests */
- if (ctx->sq_data) {
- io_sq_thread_park(ctx->sq_data);
- list_del_init(&ctx->sqd_list);
- io_sqd_update_thread_idle(ctx->sq_data);
- io_sq_thread_unpark(ctx->sq_data);
- }
-
io_kill_timeouts(ctx, NULL, NULL);
io_poll_remove_all(ctx, NULL, NULL);
@@ -8998,6 +9030,8 @@ void __io_uring_task_cancel(void)
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
+ __io_uring_files_cancel(NULL);
+
do {
/* read completions before cancelations */
inflight = tctx_inflight(tctx);
diff --git a/fs/namei.c b/fs/namei.c
index 216f16e74351..48a2f288e802 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -579,6 +579,8 @@ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
p->stack = p->internal;
p->dfd = dfd;
p->name = name;
+ p->path.mnt = NULL;
+ p->path.dentry = NULL;
p->total_link_count = old ? old->total_link_count : 0;
p->saved = old;
current->nameidata = p;
@@ -652,6 +654,8 @@ static void terminate_walk(struct nameidata *nd)
rcu_read_unlock();
}
nd->depth = 0;
+ nd->path.mnt = NULL;
+ nd->path.dentry = NULL;
}
/* path_put is needed afterwards regardless of success or failure */
@@ -2322,8 +2326,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
}
nd->root.mnt = NULL;
- nd->path.mnt = NULL;
- nd->path.dentry = NULL;
/* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
@@ -2419,16 +2421,16 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
while (!(err = link_path_walk(s, nd)) &&
(s = lookup_last(nd)) != NULL)
;
+ if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
+ err = handle_lookup_down(nd);
+ nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
+ }
if (!err)
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY)
if (!d_can_lookup(nd->path.dentry))
err = -ENOTDIR;
- if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
- err = handle_lookup_down(nd);
- nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
- }
if (!err) {
*path = nd->path;
nd->path.mnt = NULL;
diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
index 9b3b06da568c..e47fde1182de 100644
--- a/fs/reiserfs/xattr.h
+++ b/fs/reiserfs/xattr.h
@@ -44,7 +44,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
static inline int reiserfs_xattrs_initialized(struct super_block *sb)
{
- return REISERFS_SB(sb)->priv_root != NULL;
+ return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
}
#define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 02a716a0af5d..f28b097c658f 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -233,6 +233,7 @@ struct acpi_pnp_type {
struct acpi_device_pnp {
acpi_bus_id bus_id; /* Object name */
+ int instance_no; /* Instance number of this object */
struct acpi_pnp_type type; /* ID type */
acpi_bus_address bus_address; /* _ADR */
char *unique_id; /* _UID */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fcdaab723916..3bdcfc4401b7 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -222,10 +222,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
@@ -814,9 +818,12 @@ static inline int acpi_boot_init(void)
return 0;
}
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
static inline void acpi_boot_table_init(void)
{
- return;
}
static inline int acpi_mps_check(void)
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 47482049f640..40dd6afbfd81 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -480,7 +480,6 @@ struct virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
u8 key[1]; /* RSS hash key, packed bytes */
- u8 pad[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
@@ -489,7 +488,6 @@ struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
u8 lut[1]; /* RSS lookup table */
- u8 pad[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bc6bc8383b43..158aefae1030 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -85,8 +85,6 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
/* account into disk and partition IO statistics */
#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
-/* request came from our alloc pool */
-#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
/* runtime pm request */
#define RQF_PM ((__force req_flags_t)(1 << 15))
/* on IO scheduler merge hash */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9fdd839b418c..c9b7a876b0c8 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -40,6 +40,7 @@ struct bpf_local_storage;
struct bpf_local_storage_map;
struct kobject;
struct mem_cgroup;
+struct module;
struct bpf_func_state;
extern struct idr btf_idr;
@@ -645,6 +646,7 @@ struct bpf_trampoline {
/* Executable image of trampoline */
struct bpf_tramp_image *cur_image;
u64 selector;
+ struct module *mod;
};
struct bpf_attach_target_info {
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 7f4ac87c0b32..5c641f930caf 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -253,7 +253,11 @@ struct target_type {
#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
/*
- * Indicates that a target supports host-managed zoned block devices.
+ * Indicates support for zoned block devices:
+ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
+ * block devices but does not support combining different zoned models.
+ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
+ * devices with different zoned models.
*/
#ifdef CONFIG_BLK_DEV_ZONED
#define DM_TARGET_ZONED_HM 0x00000040
@@ -275,6 +279,15 @@ struct target_type {
#define DM_TARGET_PASSES_CRYPTO 0x00000100
#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
+#ifdef CONFIG_BLK_DEV_ZONED
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
+#define dm_target_supports_mixed_zoned_model(type) \
+ ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
+#else
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
+#define dm_target_supports_mixed_zoned_model(type) (false)
+#endif
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 5c631a298994..4290e2fa3117 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -87,9 +87,7 @@ u32 ethtool_op_get_link(struct net_device *dev);
int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
-/**
- * struct ethtool_link_ext_state_info - link extended state and substate.
- */
+/* Link extended state and substate. */
struct ethtool_link_ext_state_info {
enum ethtool_link_ext_state link_ext_state;
union {
@@ -129,7 +127,6 @@ struct ethtool_link_ksettings {
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
} link_modes;
u32 lanes;
- enum ethtool_link_mode_bit_indices link_mode;
};
/**
@@ -292,6 +289,9 @@ struct ethtool_pause_stats {
* do not attach ext_substate attribute to netlink message). If link_ext_state
* and link_ext_substate are unknown, return -ENODATA. If not implemented,
* link_ext_state and link_ext_substate will not be sent to userspace.
+ * @get_eeprom_len: Read range of EEPROM addresses for validation of
+ * @get_eeprom and @set_eeprom requests.
+ * Returns 0 if device does not support EEPROM access.
* @get_eeprom: Read data from the device EEPROM.
* Should fill in the magic field. Don't need to check len for zero
* or wraparound. Fill in the data argument with the eeprom values
@@ -384,6 +384,8 @@ struct ethtool_pause_stats {
* @get_module_eeprom: Get the eeprom information from the plug-in module
* @get_eee: Get Energy-Efficient (EEE) supported and status.
* @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_tunable: Read the value of a driver / device tunable.
+ * @set_tunable: Set the value of a driver / device tunable.
* @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
@@ -549,8 +551,8 @@ struct phy_tdr_config;
* @get_sset_count: Get number of strings that @get_strings will write.
* @get_strings: Return a set of strings that describe the requested objects
* @get_stats: Return extended statistics about the PHY device.
- * @start_cable_test - Start a cable test
- * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test
+ * @start_cable_test: Start a cable test
+ * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
*
* All operations are optional (i.e. the function pointer may be set to %NULL)
* and callers must take this into account. Callers must hold the RTNL lock.
@@ -574,6 +576,15 @@ struct ethtool_phy_ops {
void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
/**
+ * ethtool_params_from_link_mode - Derive link parameters from a given link mode
+ * @link_ksettings: Link parameters to be derived from the link mode
+ * @link_mode: Link mode
+ */
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+ enum ethtool_link_mode_bit_indices link_mode);
+
+/**
* ethtool_sprintf - Write formatted string to ethtool string data
* @data: Pointer to start of string to update
* @fmt: Format of string to write
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index fd183fb9c20f..0c19010da77f 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -271,6 +271,29 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb) { }
+
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
return ERR_PTR(-ENODEV);
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index ebc295647581..19781b0f6429 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -56,7 +56,7 @@
* COMMAND_RECONFIG_FLAG_PARTIAL:
* Set to FPGA configuration type (full or partial).
*/
-#define COMMAND_RECONFIG_FLAG_PARTIAL 1
+#define COMMAND_RECONFIG_FLAG_PARTIAL 0
/*
* Timeout settings for service clients:
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index ce59a6a6a008..9eb77c87a83b 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -320,7 +320,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device);
-int host1x_client_register(struct host1x_client *client);
+int __host1x_client_register(struct host1x_client *client,
+ struct lock_class_key *key);
+#define host1x_client_register(class) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_register(class, &__key); \
+ })
+
int host1x_client_unregister(struct host1x_client *client);
int host1x_client_suspend(struct host1x_client *client);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 432290b58a0b..1599deee0456 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -437,11 +437,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_60[0x18];
u8 log_max_ft_num[0x8];
- u8 reserved_at_80[0x18];
+ u8 reserved_at_80[0x10];
+ u8 log_max_flow_counter[0x8];
u8 log_max_destination[0x8];
- u8 log_max_flow_counter[0x8];
- u8 reserved_at_a8[0x10];
+ u8 reserved_at_a0[0x18];
u8 log_max_flow[0x8];
u8 reserved_at_c0[0x40];
@@ -8847,6 +8847,8 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_100g_2x[0x10];
u8 fec_override_admin_50g_1x[0x10];
+
+ u8 reserved_at_140[0x140];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -10215,7 +10217,7 @@ struct mlx5_ifc_pbmc_reg_bits {
struct mlx5_ifc_bufferx_reg_bits buffer[10];
- u8 reserved_at_2e0[0x40];
+ u8 reserved_at_2e0[0x80];
};
struct mlx5_ifc_qtct_reg_bits {
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 0cd631a19727..515cff77a4f4 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -185,7 +185,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
/*
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index ec2ad4b0fe14..c4fdb4463f7d 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -460,7 +460,5 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
int geni_icc_enable(struct geni_se *se);
int geni_icc_disable(struct geni_se *se);
-
-void geni_remove_earlycon_icc_vote(void);
#endif
#endif
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index f78e90a04a69..e242bf3d2b4a 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -403,7 +403,6 @@ static inline void sk_psock_cork_free(struct sk_psock *psock)
static inline void sk_psock_restore_proto(struct sock *sk,
struct sk_psock *psock)
{
- sk->sk_prot->unhash = psock->saved_unhash;
if (psock->psock_update_sk_prot)
psock->psock_update_sk_prot(sk, true);
}
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 6b5fcfa1e555..b465f8f3e554 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -62,15 +62,21 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
return -EINVAL;
}
+ skb_reset_mac_header(skb);
+
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
- u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+ u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
+ u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+ u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
+
+ if (!pskb_may_pull(skb, needed))
+ return -EINVAL;
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
p_off = skb_transport_offset(skb) + thlen;
- if (p_off > skb_headlen(skb))
+ if (!pskb_may_pull(skb, p_off))
return -EINVAL;
} else {
/* gso packets without NEEDS_CSUM do not set transport_offset.
@@ -100,14 +106,14 @@ retry:
}
p_off = keys.control.thoff + thlen;
- if (p_off > skb_headlen(skb) ||
+ if (!pskb_may_pull(skb, p_off) ||
keys.basic.ip_proto != ip_proto)
return -EINVAL;
skb_set_transport_header(skb, keys.control.thoff);
} else if (gso_type) {
p_off = thlen;
- if (p_off > skb_headlen(skb))
+ if (!pskb_may_pull(skb, p_off))
return -EINVAL;
}
}
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 92c0160b3352..a91e3d90df8a 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -229,9 +229,10 @@ static inline int xa_err(void *entry)
*
* This structure is used either directly or via the XA_LIMIT() macro
* to communicate the range of IDs that are valid for allocation.
- * Two common ranges are predefined for you:
+ * Three common ranges are predefined for you:
* * xa_limit_32b - [0 - UINT_MAX]
* * xa_limit_31b - [0 - INT_MAX]
+ * * xa_limit_16b - [0 - USHRT_MAX]
*/
struct xa_limit {
u32 max;
@@ -242,6 +243,7 @@ struct xa_limit {
#define xa_limit_32b XA_LIMIT(0, UINT_MAX)
#define xa_limit_31b XA_LIMIT(0, INT_MAX)
+#define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
typedef unsigned __bitwise xa_mark_t;
#define XA_MARK_0 ((__force xa_mark_t)0U)
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 2bf3092ae7ec..086b291e9530 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -170,12 +170,7 @@ void tcf_idr_insert_many(struct tc_action *actions[]);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
-int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
-
-static inline int tcf_idr_release(struct tc_action *a, bool bind)
-{
- return __tcf_idr_release(a, bind, false);
-}
+int tcf_idr_release(struct tc_action *a, bool bind);
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
@@ -185,7 +180,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est, char *name, int ovr, int bind,
- struct tc_action *actions[], size_t *attr_size,
+ struct tc_action *actions[], int init_res[], size_t *attr_size,
bool rtnl_held, struct netlink_ext_ack *extack);
struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
bool rtnl_held,
@@ -193,7 +188,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind,
- struct tc_action_ops *ops, bool rtnl_held,
+ struct tc_action_ops *a_o, int *init_res,
+ bool rtnl_held,
struct netlink_ext_ack *extack);
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
int ref, bool terse);
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 59f45b1e9dac..e816b6a3ef2b 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -72,7 +72,9 @@ struct netns_xfrm {
#if IS_ENABLED(CONFIG_IPV6)
struct dst_ops xfrm6_dst_ops;
#endif
- spinlock_t xfrm_state_lock;
+ spinlock_t xfrm_state_lock;
+ seqcount_spinlock_t xfrm_state_hash_generation;
+
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
};
diff --git a/include/net/red.h b/include/net/red.h
index 0b39eff1d50a..be11dbd26492 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -171,9 +171,9 @@ static inline void red_set_vars(struct red_vars *v)
static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
u8 Scell_log, u8 *stab)
{
- if (fls(qth_min) + Wlog > 32)
+ if (fls(qth_min) + Wlog >= 32)
return false;
- if (fls(qth_max) + Wlog > 32)
+ if (fls(qth_max) + Wlog >= 32)
return false;
if (Scell_log >= 32)
return false;
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 4da61c950e93..479f60ef54c0 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -147,8 +147,8 @@ struct rtnl_af_ops {
int (*validate_link_af)(const struct net_device *dev,
const struct nlattr *attr);
int (*set_link_af)(struct net_device *dev,
- const struct nlattr *attr);
-
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
int (*fill_stats_af)(struct sk_buff *skb,
const struct net_device *dev);
size_t (*get_stats_af_size)(const struct net_device *dev);
diff --git a/include/net/sock.h b/include/net/sock.h
index 8b4155e756c2..cadcc12cc316 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -934,9 +934,13 @@ static inline void sk_acceptq_added(struct sock *sk)
WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
}
+/* Note: If you think the test should be:
+ * return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+ * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
+ */
static inline bool sk_acceptq_is_full(const struct sock *sk)
{
- return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+ return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
}
/*
@@ -2224,6 +2228,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
+static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+{
+ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+ skb_orphan(skb);
+ skb->destructor = sock_efree;
+ skb->sk = sk;
+ }
+}
+
void sk_reset_timer(struct sock *sk, struct timer_list *timer,
unsigned long expires);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b2a06f10b62c..c58a6d4eb610 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1097,7 +1097,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
return __xfrm_policy_check(sk, ndir, skb, family);
return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
- (skb_dst(skb)->flags & DST_NOPOLICY) ||
+ (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
__xfrm_policy_check(sk, ndir, skb, family);
}
@@ -1557,7 +1557,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
int xfrm_trans_queue(struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *));
-int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
int xfrm_output(struct sock *sk, struct sk_buff *skb);
#if IS_ENABLED(CONFIG_NET_PKTGEN)
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 8a26a2ffa952..fc5a39839b4b 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -193,6 +193,7 @@ enum iscsi_connection_state {
ISCSI_CONN_UP = 0,
ISCSI_CONN_DOWN,
ISCSI_CONN_FAILED,
+ ISCSI_CONN_BOUND,
};
struct iscsi_cls_conn {
diff --git a/include/uapi/linux/blkpg.h b/include/uapi/linux/blkpg.h
index ac6474e4f29d..d0a64ee97c6d 100644
--- a/include/uapi/linux/blkpg.h
+++ b/include/uapi/linux/blkpg.h
@@ -2,29 +2,6 @@
#ifndef _UAPI__LINUX_BLKPG_H
#define _UAPI__LINUX_BLKPG_H
-/*
- * Partition table and disk geometry handling
- *
- * A single ioctl with lots of subfunctions:
- *
- * Device number stuff:
- * get_whole_disk() (given the device number of a partition,
- * find the device number of the encompassing disk)
- * get_all_partitions() (given the device number of a disk, return the
- * device numbers of all its known partitions)
- *
- * Partition stuff:
- * add_partition()
- * delete_partition()
- * test_partition_in_use() (also for test_disk_in_use)
- *
- * Geometry stuff:
- * get_geometry()
- * set_geometry()
- * get_bios_drivedata()
- *
- * For today, only the partition stuff - aeb, 990515
- */
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -52,9 +29,8 @@ struct blkpg_partition {
long long start; /* starting offset in bytes */
long long length; /* length in bytes */
int pno; /* partition number */
- char devname[BLKPG_DEVNAMELTH]; /* partition name, like sda5 or c0d1p2,
- to be used in kernel messages */
- char volname[BLKPG_VOLNAMELTH]; /* volume label */
+ char devname[BLKPG_DEVNAMELTH]; /* unused / ignored */
+ char volname[BLKPG_VOLNAMELTH]; /* unused / ignore */
};
#endif /* _UAPI__LINUX_BLKPG_H */
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index f75238ac6dce..c7535352fef6 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -113,7 +113,7 @@ struct can_frame {
*/
__u8 len;
__u8 can_dlc; /* deprecated */
- };
+ } __attribute__((packed)); /* disable padding added in some ABIs */
__u8 __pad; /* padding */
__u8 __res0; /* reserved / padding */
__u8 len8_dlc; /* optional DLC for 8 byte payload length (9 .. 15) */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 868b513d4f54..f91e079e3108 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -26,6 +26,14 @@
* have the same layout for 32-bit and 64-bit userland.
*/
+/* Note on reserved space.
+ * Reserved fields must not be accessed directly by user space because
+ * they may be replaced by a different field in the future. They must
+ * be initialized to zero before making the request, e.g. via memset
+ * of the entire structure or implicitly by not being set in a structure
+ * initializer.
+ */
+
/**
* struct ethtool_cmd - DEPRECATED, link control and status
* This structure is DEPRECATED, please use struct ethtool_link_settings.
@@ -67,6 +75,7 @@
* and other link features that the link partner advertised
* through autonegotiation; 0 if unknown or not applicable.
* Read-only.
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* The link speed in Mbps is split between @speed and @speed_hi. Use
* the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
@@ -155,6 +164,7 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
* @bus_info: Device bus address. This should match the dev_name()
* string for the underlying bus device, if there is one. May be
* an empty string.
+ * @reserved2: Reserved for future use; see the note on reserved space.
* @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
* %ETHTOOL_SPFLAGS commands; also the number of strings in the
* %ETH_SS_PRIV_FLAGS set
@@ -356,6 +366,7 @@ struct ethtool_eeprom {
* @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
* its tx lpi (after reaching 'idle' state). Effective only when eee
* was negotiated and tx_lpi_enabled was set.
+ * @reserved: Reserved for future use; see the note on reserved space.
*/
struct ethtool_eee {
__u32 cmd;
@@ -374,6 +385,7 @@ struct ethtool_eee {
* @cmd: %ETHTOOL_GMODULEINFO
* @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
* @eeprom_len: Length of the eeprom
+ * @reserved: Reserved for future use; see the note on reserved space.
*
* This structure is used to return the information to
* properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
@@ -579,9 +591,7 @@ struct ethtool_pauseparam {
__u32 tx_pause;
};
-/**
- * enum ethtool_link_ext_state - link extended state
- */
+/* Link extended state */
enum ethtool_link_ext_state {
ETHTOOL_LINK_EXT_STATE_AUTONEG,
ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
@@ -595,10 +605,7 @@ enum ethtool_link_ext_state {
ETHTOOL_LINK_EXT_STATE_OVERHEAT,
};
-/**
- * enum ethtool_link_ext_substate_autoneg - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_AUTONEG.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
enum ethtool_link_ext_substate_autoneg {
ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
@@ -608,9 +615,7 @@ enum ethtool_link_ext_substate_autoneg {
ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
};
-/**
- * enum ethtool_link_ext_substate_link_training - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
*/
enum ethtool_link_ext_substate_link_training {
ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
@@ -619,9 +624,7 @@ enum ethtool_link_ext_substate_link_training {
ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
};
-/**
- * enum ethtool_link_ext_substate_logical_mismatch - more information in addition
- * to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
*/
enum ethtool_link_ext_substate_link_logical_mismatch {
ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
@@ -631,19 +634,14 @@ enum ethtool_link_ext_substate_link_logical_mismatch {
ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
};
-/**
- * enum ethtool_link_ext_substate_bad_signal_integrity - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
*/
enum ethtool_link_ext_substate_bad_signal_integrity {
ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
};
-/**
- * enum ethtool_link_ext_substate_cable_issue - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */
enum ethtool_link_ext_substate_cable_issue {
ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
@@ -661,6 +659,7 @@ enum ethtool_link_ext_substate_cable_issue {
* now deprecated
* @ETH_SS_FEATURES: Device feature names
* @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
+ * @ETH_SS_TUNABLES: tunable names
* @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
* @ETH_SS_PHY_TUNABLES: PHY tunable names
* @ETH_SS_LINK_MODES: link mode names
@@ -670,6 +669,8 @@ enum ethtool_link_ext_substate_cable_issue {
* @ETH_SS_TS_TX_TYPES: timestamping Tx types
* @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
* @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
+ *
+ * @ETH_SS_COUNT: number of defined string sets
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -715,6 +716,7 @@ struct ethtool_gstrings {
/**
* struct ethtool_sset_info - string set information
* @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @reserved: Reserved for future use; see the note on reserved space.
* @sset_mask: On entry, a bitmask of string sets to query, with bits
* numbered according to &enum ethtool_stringset. On return, a
* bitmask of those string sets queried that are supported.
@@ -759,6 +761,7 @@ enum ethtool_test_flags {
* @flags: A bitmask of flags from &enum ethtool_test_flags. Some
* flags may be set by the user on entry; others may be set by
* the driver on return.
+ * @reserved: Reserved for future use; see the note on reserved space.
* @len: On return, the number of test results
* @data: Array of test results
*
@@ -959,6 +962,7 @@ union ethtool_flow_union {
* @vlan_etype: VLAN EtherType
* @vlan_tci: VLAN tag control information
* @data: user defined data
+ * @padding: Reserved for future use; see the note on reserved space.
*
* Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT
* is set in &struct ethtool_rx_flow_spec @flow_type.
@@ -1134,7 +1138,8 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
- * @rsvd: Reserved for future extensions.
+ * @rsvd8: Reserved for future use; see the note on reserved space.
+ * @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
* of @indir_size __u32 elements, followed by hash key of @key_size
* bytes.
@@ -1302,7 +1307,9 @@ struct ethtool_sfeatures {
* @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
* @phc_index: device index of the associated PHC, or -1 if there is none
* @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @tx_reserved: Reserved for future use; see the note on reserved space.
* @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ * @rx_reserved: Reserved for future use; see the note on reserved space.
*
* The bits in the 'tx_types' and 'rx_filters' fields correspond to
* the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
@@ -1981,6 +1988,11 @@ enum ethtool_reset_flags {
* autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
+ * @master_slave_cfg: Master/slave port mode.
+ * @master_slave_state: Master/slave port state.
+ * @reserved: Reserved for future use; see the note on reserved space.
+ * @reserved1: Reserved for future use; see the note on reserved space.
+ * @link_mode_masks: Variable length bitmaps.
*
* If autonegotiation is disabled, the speed and @duplex represent the
* fixed link mode and are writable if the driver supports multiple
diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h
index 03e8af87b364..9b77cfc42efa 100644
--- a/include/uapi/linux/rfkill.h
+++ b/include/uapi/linux/rfkill.h
@@ -86,34 +86,90 @@ enum rfkill_hard_block_reasons {
* @op: operation code
* @hard: hard state (0/1)
* @soft: soft state (0/1)
+ *
+ * Structure used for userspace communication on /dev/rfkill,
+ * used for events from the kernel and control to the kernel.
+ */
+struct rfkill_event {
+ __u32 idx;
+ __u8 type;
+ __u8 op;
+ __u8 soft;
+ __u8 hard;
+} __attribute__((packed));
+
+/**
+ * struct rfkill_event_ext - events for userspace on /dev/rfkill
+ * @idx: index of dev rfkill
+ * @type: type of the rfkill struct
+ * @op: operation code
+ * @hard: hard state (0/1)
+ * @soft: soft state (0/1)
* @hard_block_reasons: valid if hard is set. One or several reasons from
* &enum rfkill_hard_block_reasons.
*
* Structure used for userspace communication on /dev/rfkill,
* used for events from the kernel and control to the kernel.
+ *
+ * See the extensibility docs below.
*/
-struct rfkill_event {
+struct rfkill_event_ext {
__u32 idx;
__u8 type;
__u8 op;
__u8 soft;
__u8 hard;
+
+ /*
+ * older kernels will accept/send only up to this point,
+ * and if extended further up to any chunk marked below
+ */
+
__u8 hard_block_reasons;
} __attribute__((packed));
-/*
- * We are planning to be backward and forward compatible with changes
- * to the event struct, by adding new, optional, members at the end.
- * When reading an event (whether the kernel from userspace or vice
- * versa) we need to accept anything that's at least as large as the
- * version 1 event size, but might be able to accept other sizes in
- * the future.
+/**
+ * DOC: Extensibility
+ *
+ * Originally, we had planned to allow backward and forward compatible
+ * changes by just adding fields at the end of the structure that are
+ * then not reported on older kernels on read(), and not written to by
+ * older kernels on write(), with the kernel reporting the size it did
+ * accept as the result.
+ *
+ * This would have allowed userspace to detect on read() and write()
+ * which kernel structure version it was dealing with, and if was just
+ * recompiled it would have gotten the new fields, but obviously not
+ * accessed them, but things should've continued to work.
+ *
+ * Unfortunately, while actually exercising this mechanism to add the
+ * hard block reasons field, we found that userspace (notably systemd)
+ * did all kinds of fun things not in line with this scheme:
+ *
+ * 1. treat the (expected) short writes as an error;
+ * 2. ask to read sizeof(struct rfkill_event) but then compare the
+ * actual return value to RFKILL_EVENT_SIZE_V1 and treat any
+ * mismatch as an error.
+ *
+ * As a consequence, just recompiling with a new struct version caused
+ * things to no longer work correctly on old and new kernels.
+ *
+ * Hence, we've rolled back &struct rfkill_event to the original version
+ * and added &struct rfkill_event_ext. This effectively reverts to the
+ * old behaviour for all userspace, unless it explicitly opts in to the
+ * rules outlined here by using the new &struct rfkill_event_ext.
+ *
+ * Userspace using &struct rfkill_event_ext must adhere to the following
+ * rules
*
- * One exception is the kernel -- we already have two event sizes in
- * that we've made the 'hard' member optional since our only option
- * is to ignore it anyway.
+ * 1. accept short writes, optionally using them to detect that it's
+ * running on an older kernel;
+ * 2. accept short reads, knowing that this means it's running on an
+ * older kernel;
+ * 3. treat reads that are as long as requested as acceptable, not
+ * checking against RFKILL_EVENT_SIZE_V1 or such.
*/
-#define RFKILL_EVENT_SIZE_V1 8
+#define RFKILL_EVENT_SIZE_V1 sizeof(struct rfkill_event)
/* ioctl for turning off rfkill-input (if present) */
#define RFKILL_IOC_MAGIC 'R'
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index dad821c8ecd0..bbfc6bb79240 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -91,7 +91,7 @@ static const char *const bpf_atomic_alu_string[16] = {
[BPF_ADD >> 4] = "add",
[BPF_AND >> 4] = "and",
[BPF_OR >> 4] = "or",
- [BPF_XOR >> 4] = "or",
+ [BPF_XOR >> 4] = "xor",
};
static const char *const bpf_ldst_string[] = {
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 1576ff331ee4..d2de2abec35b 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -543,11 +543,11 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
return PTR_ERR(raw);
if (type == BPF_TYPE_PROG)
- ret = bpf_prog_new_fd(raw);
+ ret = (f_flags != O_RDWR) ? -EINVAL : bpf_prog_new_fd(raw);
else if (type == BPF_TYPE_MAP)
ret = bpf_map_new_fd(raw, f_flags);
else if (type == BPF_TYPE_LINK)
- ret = bpf_link_new_fd(raw);
+ ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
else
return -ENOENT;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index be35bfb7fb13..6fbc2abe9c91 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -517,9 +517,17 @@ const struct bpf_func_proto bpf_get_stack_proto = {
BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
u32, size, u64, flags)
{
- struct pt_regs *regs = task_pt_regs(task);
+ struct pt_regs *regs;
+ long res;
- return __bpf_get_stack(regs, task, NULL, buf, size, flags);
+ if (!try_get_task_stack(task))
+ return -EFAULT;
+
+ regs = task_pt_regs(task);
+ res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
+ put_task_stack(task);
+
+ return res;
}
BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 1f3a4be4b175..4aa8b52adf25 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -9,6 +9,7 @@
#include <linux/btf.h>
#include <linux/rcupdate_trace.h>
#include <linux/rcupdate_wait.h>
+#include <linux/module.h>
/* dummy _ops. The verifier will operate on target program's ops. */
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
@@ -87,6 +88,26 @@ out:
return tr;
}
+static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
+{
+ struct module *mod;
+ int err = 0;
+
+ preempt_disable();
+ mod = __module_text_address((unsigned long) tr->func.addr);
+ if (mod && !try_module_get(mod))
+ err = -ENOENT;
+ preempt_enable();
+ tr->mod = mod;
+ return err;
+}
+
+static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
+{
+ module_put(tr->mod);
+ tr->mod = NULL;
+}
+
static int is_ftrace_location(void *ip)
{
long addr;
@@ -108,6 +129,9 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
ret = unregister_ftrace_direct((long)ip, (long)old_addr);
else
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
+
+ if (!ret)
+ bpf_trampoline_module_put(tr);
return ret;
}
@@ -134,10 +158,16 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
return ret;
tr->func.ftrace_managed = ret;
+ if (bpf_trampoline_module_get(tr))
+ return -ENOENT;
+
if (tr->func.ftrace_managed)
ret = register_ftrace_direct((long)ip, (long)new_addr);
else
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
+
+ if (ret)
+ bpf_trampoline_module_put(tr);
return ret;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 852541a435ef..f63b27574b3a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12724,6 +12724,11 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
u32 btf_id, member_idx;
const char *mname;
+ if (!prog->gpl_compatible) {
+ verbose(env, "struct ops programs must have a GPL compatible license\n");
+ return -EINVAL;
+ }
+
btf_id = prog->aux->attach_btf_id;
st_ops = bpf_struct_ops_find(btf_id);
if (!st_ops) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 50209691f21a..85ca68f4b01e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1950,8 +1950,14 @@ static __latent_entropy struct task_struct *copy_process(
p = dup_task_struct(current, node);
if (!p)
goto fork_out;
- if (args->io_thread)
+ if (args->io_thread) {
+ /*
+ * Mark us an IO worker, and block any signal that isn't
+ * fatal or STOP
+ */
p->flags |= PF_IO_WORKER;
+ siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
+ }
/*
* This _must_ happen before we call free_task(), i.e. before we jump
@@ -2443,14 +2449,8 @@ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
.stack_size = (unsigned long)arg,
.io_thread = 1,
};
- struct task_struct *tsk;
- tsk = copy_process(NULL, 0, node, &args);
- if (!IS_ERR(tsk)) {
- sigfillset(&tsk->blocked);
- sigdelsetmask(&tsk->blocked, sigmask(SIGKILL));
- }
- return tsk;
+ return copy_process(NULL, 0, node, &args);
}
/*
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 1a2d57d1327c..dc520f01f99d 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -134,7 +134,7 @@ bool freeze_task(struct task_struct *p)
return false;
}
- if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))
+ if (!(p->flags & PF_KTHREAD))
fake_signal_wake_up(p);
else
wake_up_state(p, TASK_INTERRUPTIBLE);
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index 1358fa4abfa8..0f4530b3a8cd 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -98,7 +98,7 @@ static int __init em_debug_init(void)
return 0;
}
-core_initcall(em_debug_init);
+fs_initcall(em_debug_init);
#else /* CONFIG_DEBUG_FS */
static void em_debug_create_pd(struct device *dev) {}
static void em_debug_remove_pd(struct device *dev) {}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 821cf1723814..61db50f7ca86 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -375,7 +375,7 @@ static int ptrace_attach(struct task_struct *task, long request,
audit_ptrace(task);
retval = -EPERM;
- if (unlikely(task->flags & (PF_KTHREAD | PF_IO_WORKER)))
+ if (unlikely(task->flags & PF_KTHREAD))
goto out;
if (same_thread_group(task, current))
goto out;
diff --git a/kernel/signal.c b/kernel/signal.c
index f2a1b898da29..f2718350bf4b 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -91,7 +91,7 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
return true;
/* Only allow kernel generated signals to this kthread */
- if (unlikely((t->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
+ if (unlikely((t->flags & PF_KTHREAD) &&
(handler == SIG_KTHREAD_KERNEL) && !force))
return true;
@@ -288,8 +288,7 @@ bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
- if (unlikely(fatal_signal_pending(task) ||
- (task->flags & (PF_EXITING | PF_IO_WORKER))))
+ if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
return false;
if (mask & JOBCTL_STOP_SIGMASK)
@@ -834,9 +833,6 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info,
if (!valid_signal(sig))
return -EINVAL;
- /* PF_IO_WORKER threads don't take any signals */
- if (t->flags & PF_IO_WORKER)
- return -ESRCH;
if (!si_fromuser(info))
return 0;
@@ -1100,7 +1096,7 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
/*
* Skip useless siginfo allocation for SIGKILL and kernel threads.
*/
- if ((sig == SIGKILL) || (t->flags & (PF_KTHREAD | PF_IO_WORKER)))
+ if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
goto out_set;
/*
@@ -2772,13 +2768,21 @@ relock:
}
/*
+ * PF_IO_WORKER threads will catch and exit on fatal signals
+ * themselves. They have cleanup that must be performed, so
+ * we cannot call do_exit() on their behalf.
+ */
+ if (current->flags & PF_IO_WORKER)
+ goto out;
+
+ /*
* Death signals, no core dump.
*/
do_group_exit(ksig->info.si_signo);
/* NOTREACHED */
}
spin_unlock_irq(&sighand->siglock);
-
+out:
ksig->sig = signr;
if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b7e29db127fa..3ba52d4e1314 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3231,7 +3231,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
pg = start_pg;
while (pg) {
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
+ if (order >= 0)
+ free_pages((unsigned long)pg->records, order);
start_pg = pg->next;
kfree(pg);
pg = start_pg;
@@ -6451,7 +6452,8 @@ void ftrace_release_mod(struct module *mod)
clear_mod_from_hashes(pg);
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
+ if (order >= 0)
+ free_pages((unsigned long)pg->records, order);
tmp_page = pg->next;
kfree(pg);
ftrace_number_of_pages -= 1 << order;
@@ -6811,7 +6813,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
if (!pg->index) {
*last_pg = pg->next;
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
+ if (order >= 0)
+ free_pages((unsigned long)pg->records, order);
ftrace_number_of_pages -= 1 << order;
ftrace_number_of_groups--;
kfree(pg);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index eccb4e1187cc..5c777627212f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2984,7 +2984,8 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
size = nr_entries * sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
- sizeof(*entry) + size, trace_ctx);
+ (sizeof(*entry) - sizeof(entry->caller)) + size,
+ trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 71109065bd8e..107bc38b1945 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -278,9 +278,10 @@ void touch_all_softlockup_watchdogs(void)
* update as well, the only side effect might be a cycle delay for
* the softlockup check.
*/
- for_each_cpu(cpu, &watchdog_allowed_mask)
+ for_each_cpu(cpu, &watchdog_allowed_mask) {
per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
- wq_watchdog_touch(-1);
+ wq_watchdog_touch(cpu);
+ }
}
void touch_softlockup_watchdog_sync(void)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0d150da252e8..79f2319543ce 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1412,7 +1412,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
*/
lockdep_assert_irqs_disabled();
- debug_work_activate(work);
/* if draining, only works from the same workqueue are allowed */
if (unlikely(wq->flags & __WQ_DRAINING) &&
@@ -1494,6 +1493,7 @@ retry:
worklist = &pwq->delayed_works;
}
+ debug_work_activate(work);
insert_work(pwq, work, worklist, work_flags);
out:
@@ -5787,22 +5787,17 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
continue;
/* get the latest of pool and touched timestamps */
+ if (pool->cpu >= 0)
+ touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
+ else
+ touched = READ_ONCE(wq_watchdog_touched);
pool_ts = READ_ONCE(pool->watchdog_ts);
- touched = READ_ONCE(wq_watchdog_touched);
if (time_after(pool_ts, touched))
ts = pool_ts;
else
ts = touched;
- if (pool->cpu >= 0) {
- unsigned long cpu_touched =
- READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
- pool->cpu));
- if (time_after(cpu_touched, ts))
- ts = cpu_touched;
- }
-
/* did we stall? */
if (time_after(jiffies, ts + thresh)) {
lockup_detected = true;
@@ -5826,8 +5821,8 @@ notrace void wq_watchdog_touch(int cpu)
{
if (cpu >= 0)
per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
- else
- wq_watchdog_touched = jiffies;
+
+ wq_watchdog_touched = jiffies;
}
static void wq_watchdog_set_thresh(unsigned long thresh)
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 8294f43f4981..8b1c318189ce 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1530,24 +1530,24 @@ static noinline void check_store_range(struct xarray *xa)
#ifdef CONFIG_XARRAY_MULTI
static void check_split_1(struct xarray *xa, unsigned long index,
- unsigned int order)
+ unsigned int order, unsigned int new_order)
{
- XA_STATE(xas, xa, index);
- void *entry;
- unsigned int i = 0;
+ XA_STATE_ORDER(xas, xa, index, new_order);
+ unsigned int i;
xa_store_order(xa, index, order, xa, GFP_KERNEL);
xas_split_alloc(&xas, xa, order, GFP_KERNEL);
xas_lock(&xas);
xas_split(&xas, xa, order);
+ for (i = 0; i < (1 << order); i += (1 << new_order))
+ __xa_store(xa, index + i, xa_mk_index(index + i), 0);
xas_unlock(&xas);
- xa_for_each(xa, index, entry) {
- XA_BUG_ON(xa, entry != xa);
- i++;
+ for (i = 0; i < (1 << order); i++) {
+ unsigned int val = index + (i & ~((1 << new_order) - 1));
+ XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
}
- XA_BUG_ON(xa, i != 1 << order);
xa_set_mark(xa, index, XA_MARK_0);
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
@@ -1557,14 +1557,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
static noinline void check_split(struct xarray *xa)
{
- unsigned int order;
+ unsigned int order, new_order;
XA_BUG_ON(xa, !xa_empty(xa));
for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
- check_split_1(xa, 0, order);
- check_split_1(xa, 1UL << order, order);
- check_split_1(xa, 3UL << order, order);
+ for (new_order = 0; new_order < order; new_order++) {
+ check_split_1(xa, 0, order, new_order);
+ check_split_1(xa, 1UL << order, order, new_order);
+ check_split_1(xa, 3UL << order, order, new_order);
+ }
}
}
#else
diff --git a/lib/xarray.c b/lib/xarray.c
index 5fa51614802a..f5d8f54907b4 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -987,7 +987,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
* xas_split_alloc() - Allocate memory for splitting an entry.
* @xas: XArray operation state.
* @entry: New entry which will be stored in the array.
- * @order: New entry order.
+ * @order: Current entry order.
* @gfp: Memory allocation flags.
*
* This function should be called before calling xas_split().
@@ -1011,7 +1011,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
do {
unsigned int i;
- void *sibling;
+ void *sibling = NULL;
struct xa_node *node;
node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -1021,7 +1021,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
for (i = 0; i < XA_CHUNK_SIZE; i++) {
if ((i & mask) == 0) {
RCU_INIT_POINTER(node->slots[i], entry);
- sibling = xa_mk_sibling(0);
+ sibling = xa_mk_sibling(i);
} else {
RCU_INIT_POINTER(node->slots[i], sibling);
}
@@ -1041,9 +1041,10 @@ EXPORT_SYMBOL_GPL(xas_split_alloc);
* xas_split() - Split a multi-index entry into smaller entries.
* @xas: XArray operation state.
* @entry: New entry to store in the array.
- * @order: New entry order.
+ * @order: Current entry order.
*
- * The value in the entry is copied to all the replacement entries.
+ * The size of the new entries is set in @xas. The value in @entry is
+ * copied to all the replacement entries.
*
* Context: Any context. The caller should hold the xa_lock.
*/
diff --git a/mm/memory.c b/mm/memory.c
index 5efa07fb6cdc..550405fc3b5e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -166,7 +166,7 @@ static int __init init_zero_pfn(void)
zero_pfn = page_to_pfn(ZERO_PAGE(0));
return 0;
}
-core_initcall(init_zero_pfn);
+early_initcall(init_zero_pfn);
void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
{
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index f8761281aab0..434b4f042909 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -890,6 +890,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
+ tt_vlan->reserved = 0;
tt_vlan++;
}
@@ -973,6 +974,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
+ tt_vlan->reserved = 0;
tt_vlan++;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 0e5c37be4a2b..909b9e684e04 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -86,6 +86,8 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
MODULE_ALIAS("can-proto-2");
+#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
/*
* easy access to the first 64 bit of can(fd)_frame payload. cp->data is
* 64 bit aligned so the offset has to be multiples of 8 which is ensured
@@ -1292,7 +1294,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/* no bound device as default => check msg_name */
DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
- if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+ if (msg->msg_namelen < BCM_MIN_NAMELEN)
return -EINVAL;
if (addr->can_family != AF_CAN)
@@ -1534,7 +1536,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
struct net *net = sock_net(sk);
int ret = 0;
- if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+ if (len < BCM_MIN_NAMELEN)
return -EINVAL;
lock_sock(sk);
@@ -1616,8 +1618,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) {
- __sockaddr_check_size(sizeof(struct sockaddr_can));
- msg->msg_namelen = sizeof(struct sockaddr_can);
+ __sockaddr_check_size(BCM_MIN_NAMELEN);
+ msg->msg_namelen = BCM_MIN_NAMELEN;
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 15ea1234d457..9f94ad3caee9 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -77,6 +77,8 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
MODULE_ALIAS("can-proto-6");
+#define ISOTP_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp)
+
#define SINGLE_MASK(id) (((id) & CAN_EFF_FLAG) ? \
(CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
@@ -986,7 +988,8 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
sock_recv_timestamp(msg, sk, skb);
if (msg->msg_name) {
- msg->msg_namelen = sizeof(struct sockaddr_can);
+ __sockaddr_check_size(ISOTP_MIN_NAMELEN);
+ msg->msg_namelen = ISOTP_MIN_NAMELEN;
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
@@ -1056,7 +1059,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
int notify_enetdown = 0;
int do_rx_reg = 1;
- if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp))
+ if (len < ISOTP_MIN_NAMELEN)
return -EINVAL;
/* do not register frame reception for functional addressing */
@@ -1152,13 +1155,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
if (peer)
return -EOPNOTSUPP;
- memset(addr, 0, sizeof(*addr));
+ memset(addr, 0, ISOTP_MIN_NAMELEN);
addr->can_family = AF_CAN;
addr->can_ifindex = so->ifindex;
addr->can_addr.tp.rx_id = so->rxid;
addr->can_addr.tp.tx_id = so->txid;
- return sizeof(*addr);
+ return ISOTP_MIN_NAMELEN;
}
static int isotp_setsockopt(struct socket *sock, int level, int optname,
diff --git a/net/can/raw.c b/net/can/raw.c
index 37b47a39a3ed..139d9471ddcf 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -60,6 +60,8 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
MODULE_ALIAS("can-proto-1");
+#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
#define MASK_ALL 0
/* A raw socket has a list of can_filters attached to it, each receiving
@@ -394,7 +396,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
int err = 0;
int notify_enetdown = 0;
- if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+ if (len < RAW_MIN_NAMELEN)
return -EINVAL;
if (addr->can_family != AF_CAN)
return -EINVAL;
@@ -475,11 +477,11 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
if (peer)
return -EOPNOTSUPP;
- memset(addr, 0, sizeof(*addr));
+ memset(addr, 0, RAW_MIN_NAMELEN);
addr->can_family = AF_CAN;
addr->can_ifindex = ro->ifindex;
- return sizeof(*addr);
+ return RAW_MIN_NAMELEN;
}
static int raw_setsockopt(struct socket *sock, int level, int optname,
@@ -739,7 +741,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
- if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+ if (msg->msg_namelen < RAW_MIN_NAMELEN)
return -EINVAL;
if (addr->can_family != AF_CAN)
@@ -832,8 +834,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) {
- __sockaddr_check_size(sizeof(struct sockaddr_can));
- msg->msg_namelen = sizeof(struct sockaddr_can);
+ __sockaddr_check_size(RAW_MIN_NAMELEN);
+ msg->msg_namelen = RAW_MIN_NAMELEN;
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 33ff4a944109..cc5df273f766 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7041,7 +7041,7 @@ static int napi_thread_wait(struct napi_struct *napi)
set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop() && !napi_disable_pending(napi)) {
+ while (!kthread_should_stop()) {
/* Testing SCHED_THREADED bit here to make sure the current
* kthread owns this napi and could poll on this napi.
* Testing SCHED bit is not enough because SCHED bit might be
@@ -7059,6 +7059,7 @@ static int napi_thread_wait(struct napi_struct *napi)
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
+
return -1;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e2982b3970b8..8379719d1dce 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1379,7 +1379,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
* we can reinject the packet there.
*/
n2 = NULL;
- if (dst) {
+ if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
n2 = dst_neigh_lookup_skb(dst, skb);
if (n2)
n1 = n2;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9f1f55785a6f..714d5fa38546 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2872,7 +2872,7 @@ static int do_setlink(const struct sk_buff *skb,
BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
- err = af_ops->set_link_af(dev, af);
+ err = af_ops->set_link_af(dev, af, extack);
if (err < 0) {
rcu_read_unlock();
goto errout;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 92a83c02562a..43ce17a6a585 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -586,6 +586,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
if (unlikely(!msg))
return -EAGAIN;
sk_msg_init(msg);
+ skb_set_owner_r(skb, sk);
return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
}
@@ -884,7 +885,6 @@ static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int
{
switch (verdict) {
case __SK_REDIRECT:
- skb_set_owner_r(skb, sk);
sk_psock_skb_redirect(skb);
break;
case __SK_PASS:
@@ -902,10 +902,6 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
rcu_read_lock();
prog = READ_ONCE(psock->progs.stream_verdict);
if (likely(prog)) {
- /* We skip full set_owner_r here because if we do a SK_PASS
- * or SK_DROP we can skip skb memory accounting and use the
- * TLS context.
- */
skb->sk = psock->sk;
skb_dst_drop(skb);
skb_bpf_redirect_clear(skb);
@@ -995,13 +991,14 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
kfree_skb(skb);
goto out;
}
- skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.stream_verdict);
if (likely(prog)) {
+ skb->sk = sk;
skb_dst_drop(skb);
skb_bpf_redirect_clear(skb);
ret = bpf_prog_run_pin_on_cpu(prog, skb);
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
+ skb->sk = NULL;
}
sk_psock_verdict_apply(psock, skb, ret);
out:
@@ -1115,15 +1112,16 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
kfree_skb(skb);
goto out;
}
- skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.stream_verdict);
if (!prog)
prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) {
+ skb->sk = sk;
skb_dst_drop(skb);
skb_bpf_redirect_clear(skb);
ret = bpf_prog_run_pin_on_cpu(prog, skb);
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
+ skb->sk = NULL;
}
sk_psock_verdict_apply(psock, skb, ret);
out:
diff --git a/net/core/sock.c b/net/core/sock.c
index cc31b601ae10..5ec90f99e102 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2132,16 +2132,10 @@ void skb_orphan_partial(struct sk_buff *skb)
if (skb_is_tcp_pure_ack(skb))
return;
- if (can_skb_orphan_partial(skb)) {
- struct sock *sk = skb->sk;
-
- if (refcount_inc_not_zero(&sk->sk_refcnt)) {
- WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
- skb->destructor = sock_efree;
- }
- } else {
+ if (can_skb_orphan_partial(skb))
+ skb_set_owner_sk_safe(skb, skb->sk);
+ else
skb_orphan(skb);
- }
}
EXPORT_SYMBOL(skb_orphan_partial);
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 05354976c1fc..858276e72c68 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -350,7 +350,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
page = virt_to_head_page(data);
- napi_direct &= !xdp_return_frame_no_direct();
+ if (napi_direct && xdp_return_frame_no_direct())
+ napi_direct = false;
page_pool_put_full_page(xa->page_pool, page, napi_direct);
rcu_read_unlock();
break;
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index d142eb2b288b..3c3e56a1f34d 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -795,8 +795,14 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
list_for_each_entry(dp, &dst->ports, list) {
err = dsa_port_setup(dp);
- if (err)
+ if (err) {
+ dsa_port_devlink_teardown(dp);
+ dp->type = DSA_PORT_TYPE_UNUSED;
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ goto teardown;
continue;
+ }
}
return 0;
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 4b5da89dc27a..32963276452f 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -107,7 +107,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
bool unset_vlan_filtering = br_vlan_enabled(info->br);
struct dsa_switch_tree *dst = ds->dst;
struct netlink_ext_ack extack = {0};
- int err, i;
+ int err, port;
if (dst->index == info->tree_index && ds->index == info->sw_index &&
ds->ops->port_bridge_join)
@@ -124,13 +124,16 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
* it. That is a good thing, because that lets us handle it and also
* handle the case where the switch's vlan_filtering setting is global
* (not per port). When that happens, the correct moment to trigger the
- * vlan_filtering callback is only when the last port left this bridge.
+ * vlan_filtering callback is only when the last port leaves the last
+ * VLAN-aware bridge.
*/
if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
- for (i = 0; i < ds->num_ports; i++) {
- if (i == info->port)
- continue;
- if (dsa_to_port(ds, i)->bridge_dev == info->br) {
+ for (port = 0; port < ds->num_ports; port++) {
+ struct net_device *bridge_dev;
+
+ bridge_dev = dsa_to_port(ds, port)->bridge_dev;
+
+ if (bridge_dev && br_vlan_enabled(bridge_dev)) {
unset_vlan_filtering = false;
break;
}
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index c6a383dfd6c2..f9dcbad84788 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -273,6 +273,7 @@ const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(10000, KR, Full),
[ETHTOOL_LINK_MODE_10000baseR_FEC_BIT] = {
.speed = SPEED_10000,
+ .lanes = 1,
.duplex = DUPLEX_FULL,
},
__DEFINE_LINK_MODE_PARAMS(20000, MLD2, Full),
@@ -562,3 +563,19 @@ void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops)
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(ethtool_set_ethtool_phy_ops);
+
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+ enum ethtool_link_mode_bit_indices link_mode)
+{
+ const struct link_mode_info *link_info;
+
+ if (WARN_ON_ONCE(link_mode >= __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return;
+
+ link_info = &link_mode_params[link_mode];
+ link_ksettings->base.speed = link_info->speed;
+ link_ksettings->lanes = link_info->lanes;
+ link_ksettings->base.duplex = link_info->duplex;
+}
+EXPORT_SYMBOL_GPL(ethtool_params_from_link_mode);
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
index 901b7de941ab..e10bfcc07853 100644
--- a/net/ethtool/eee.c
+++ b/net/ethtool/eee.c
@@ -169,8 +169,8 @@ int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info)
ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
ethnl_update_bool32(&eee.tx_lpi_enabled,
tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
- ethnl_update_bool32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
- &mod);
+ ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
+ &mod);
ret = 0;
if (!mod)
goto out_ops;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 26b3e7086075..a9f67574148f 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -426,29 +426,13 @@ struct ethtool_link_usettings {
int __ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- const struct link_mode_info *link_info;
- int err;
-
ASSERT_RTNL();
if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
memset(link_ksettings, 0, sizeof(*link_ksettings));
-
- link_ksettings->link_mode = -1;
- err = dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
- if (err)
- return err;
-
- if (link_ksettings->link_mode != -1) {
- link_info = &link_mode_params[link_ksettings->link_mode];
- link_ksettings->base.speed = link_info->speed;
- link_ksettings->lanes = link_info->lanes;
- link_ksettings->base.duplex = link_info->duplex;
- }
-
- return 0;
+ return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
}
EXPORT_SYMBOL(__ethtool_get_link_ksettings);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 7444ec6e298e..bfcdc75fc01e 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -217,6 +217,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
if (master) {
skb->dev = master->dev;
+ skb_reset_mac_header(skb);
hsr_forward_skb(skb, master);
} else {
atomic_long_inc(&dev->tx_dropped);
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index ed82a470b6e1..b218e4594009 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -555,12 +555,6 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
{
struct hsr_frame_info frame;
- if (skb_mac_header(skb) != skb->data) {
- WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
- __FILE__, __LINE__, port->dev->name);
- goto out_drop;
- }
-
if (fill_frame_info(&frame, skb, port) < 0)
goto out_drop;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 9c640d670ffe..0c1b0770c59e 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -551,9 +551,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
- if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
- !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
- info->attrs[IEEE802154_ATTR_HW_ADDR]))
+ if (!info->attrs[IEEE802154_ATTR_PAN_ID])
return -EINVAL;
desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
@@ -562,6 +560,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
desc->device_addr.mode = IEEE802154_ADDR_SHORT;
desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
} else {
+ if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
+ return -EINVAL;
+
desc->device_addr.mode = IEEE802154_ADDR_LONG;
desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
}
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 7c5a1aa5adb4..05f6bd89a7dd 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -820,8 +820,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
goto nla_put_failure;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ goto out;
+
if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
goto nla_put_failure;
+
+out:
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
genlmsg_end(msg, hdr);
@@ -1384,6 +1389,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb,
u32 changed = 0;
int ret;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
u8 enabled;
@@ -1490,6 +1498,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+ err = skb->len;
+ goto out_err;
+ }
+
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@@ -1544,7 +1557,11 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
struct ieee802154_llsec_key_id id = { };
u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
- if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+ nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
return -EINVAL;
if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
@@ -1592,7 +1609,11 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
struct ieee802154_llsec_key_id id;
- if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+ nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
return -EINVAL;
if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
@@ -1656,6 +1677,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+ err = skb->len;
+ goto out_err;
+ }
+
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@@ -1742,6 +1768,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_device dev_desc;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
&dev_desc) < 0)
return -EINVAL;
@@ -1757,7 +1786,11 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
__le64 extended_addr;
- if (nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
+ nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
return -EINVAL;
if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
@@ -1825,6 +1858,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+ err = skb->len;
+ goto out_err;
+ }
+
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@@ -1882,6 +1920,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info
struct ieee802154_llsec_device_key key;
__le64 extended_addr;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0)
return -EINVAL;
@@ -1913,7 +1954,11 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
struct ieee802154_llsec_device_key key;
__le64 extended_addr;
- if (nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+ nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
return -EINVAL;
if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
@@ -1986,6 +2031,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+ err = skb->len;
+ goto out_err;
+ }
+
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
@@ -2070,6 +2120,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_seclevel sl;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
&sl) < 0)
return -EINVAL;
@@ -2085,6 +2138,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_seclevel sl;
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+ return -EOPNOTSUPP;
+
if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
&sl) < 0)
@@ -2098,11 +2154,7 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
#define NL802154_FLAG_NEED_NETDEV 0x02
#define NL802154_FLAG_NEED_RTNL 0x04
#define NL802154_FLAG_CHECK_NETDEV_UP 0x08
-#define NL802154_FLAG_NEED_NETDEV_UP (NL802154_FLAG_NEED_NETDEV |\
- NL802154_FLAG_CHECK_NETDEV_UP)
#define NL802154_FLAG_NEED_WPAN_DEV 0x10
-#define NL802154_FLAG_NEED_WPAN_DEV_UP (NL802154_FLAG_NEED_WPAN_DEV |\
- NL802154_FLAG_CHECK_NETDEV_UP)
static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index d99e1be94019..36ed85bf2ad5 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -141,7 +141,7 @@ static void ah_output_done(struct crypto_async_request *base, int err)
}
kfree(AH_SKB_CB(skb)->tmp);
- xfrm_output_resume(skb, err);
+ xfrm_output_resume(skb->sk, skb, err);
}
static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 75f67994fc85..2e35f68da40a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1978,7 +1978,8 @@ static int inet_validate_link_af(const struct net_device *dev,
return 0;
}
-static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct nlattr *a, *tb[IFLA_INET_MAX+1];
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 1ae920b93f39..dd1c752ea122 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -279,7 +279,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
esp_output_tail_tcp(x, skb);
else
- xfrm_output_resume(skb, err);
+ xfrm_output_resume(skb->sk, skb, err);
}
}
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 601f5fbfc63f..33687cf58286 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -217,10 +217,12 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
!(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
- esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+ esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+ NETIF_F_SCTP_CRC);
else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
!(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
- esp_features = features & ~NETIF_F_CSUM_MASK;
+ esp_features = features & ~(NETIF_F_CSUM_MASK |
+ NETIF_F_SCTP_CRC);
xo->flags |= XFRM_GSO_SEGMENT;
@@ -312,8 +314,17 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
- if (hw_offload)
+ if (hw_offload) {
+ if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+ return -ENOMEM;
+
+ xo = xfrm_offload(skb);
+ if (!xo)
+ return -EINVAL;
+
+ xo->flags |= XFRM_XMIT;
return 0;
+ }
err = esp_output_tail(x, skb, &esp);
if (err)
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index eb207089ece0..31c6c6d99d5e 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -218,7 +218,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
}
if (dst->flags & DST_XFRM_QUEUE)
- goto queued;
+ goto xmit;
if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
dev->stats.tx_carrier_errors++;
@@ -238,6 +238,8 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
if (skb->len > mtu) {
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
+ if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+ goto xmit;
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
} else {
@@ -251,7 +253,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
-queued:
+xmit:
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
skb_dst_set(skb, dst);
skb->dev = skb_dst(skb)->dev;
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 3d622a0d0753..4f49c12dae53 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -507,6 +507,12 @@ int tcp_bpf_update_proto(struct sock *sk, bool restore)
if (restore) {
if (inet_csk_has_ulp(sk)) {
+ /* TLS does not have an unhash proto in SW cases,
+ * but we need to ensure we stop using the sock_map
+ * unhash routine because the associated psock is being
+ * removed. So use the original unhash handler.
+ */
+ WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
} else {
sk->sk_write_space = psock->saved_write_space;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bfcc7f1a8a7f..15f5504adf5b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2788,6 +2788,10 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
val = up->gso_size;
break;
+ case UDP_GRO:
+ val = up->gro_enabled;
+ break;
+
/* The following two cannot be changed on UDP sockets, the return is
* always 0 (which corresponds to the full checksum coverage of UDP). */
case UDPLITE_SEND_CSCOV:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 120073ffb666..dbb5bb9269bb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5672,7 +5672,8 @@ static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
return 0;
}
-static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
+static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
+ struct netlink_ext_ack *extack)
{
struct inet6_ifaddr *ifp;
struct net_device *dev = idev->dev;
@@ -5683,12 +5684,29 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
if (!token)
return -EINVAL;
- if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
+
+ if (dev->flags & IFF_LOOPBACK) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
return -EINVAL;
- if (!ipv6_accept_ra(idev))
+ }
+
+ if (dev->flags & IFF_NOARP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device does not do neighbour discovery");
+ return -EINVAL;
+ }
+
+ if (!ipv6_accept_ra(idev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Router advertisement is disabled on device");
return -EINVAL;
- if (idev->cnf.rtr_solicits == 0)
+ }
+
+ if (idev->cnf.rtr_solicits == 0) {
+ NL_SET_ERR_MSG(extack,
+ "Router solicitation is disabled on device");
return -EINVAL;
+ }
write_lock_bh(&idev->lock);
@@ -5796,7 +5814,8 @@ static int inet6_validate_link_af(const struct net_device *dev,
return 0;
}
-static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
{
struct inet6_dev *idev = __in6_dev_get(dev);
struct nlattr *tb[IFLA_INET6_MAX + 1];
@@ -5809,7 +5828,8 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
BUG();
if (tb[IFLA_INET6_TOKEN]) {
- err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
+ err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
+ extack);
if (err)
return err;
}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 440080da805b..080ee7f44c64 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -316,7 +316,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
}
kfree(AH_SKB_CB(skb)->tmp);
- xfrm_output_resume(skb, err);
+ xfrm_output_resume(skb->sk, skb, err);
}
static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 153ad103ba74..727d791ed5e6 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -314,7 +314,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
esp_output_tail_tcp(x, skb);
else
- xfrm_output_resume(skb, err);
+ xfrm_output_resume(skb->sk, skb, err);
}
}
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 1ca516fb30e1..4af56affaafd 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -254,9 +254,11 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
- esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+ esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+ NETIF_F_SCTP_CRC);
else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
- esp_features = features & ~NETIF_F_CSUM_MASK;
+ esp_features = features & ~(NETIF_F_CSUM_MASK |
+ NETIF_F_SCTP_CRC);
xo->flags |= XFRM_GSO_SEGMENT;
@@ -346,8 +348,17 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
ipv6_hdr(skb)->payload_len = htons(len);
- if (hw_offload)
+ if (hw_offload) {
+ if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+ return -ENOMEM;
+
+ xo = xfrm_offload(skb);
+ if (!xo)
+ return -EINVAL;
+
+ xo->flags |= XFRM_XMIT;
return 0;
+ }
err = esp6_output_tail(x, skb, &esp);
if (err)
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 856e46ad0895..2d048e21abbb 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -493,7 +493,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
}
if (dst->flags & DST_XFRM_QUEUE)
- goto queued;
+ goto xmit;
x = dst->xfrm;
if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr))
@@ -522,6 +522,8 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
} else {
+ if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+ goto xmit;
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
}
@@ -530,7 +532,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
goto tx_err_dst_release;
}
-queued:
+xmit:
skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
skb_dst_set(skb, dst);
skb->dev = skb_dst(skb)->dev;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f56d9aae589..bf3646b57c68 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -298,7 +298,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST) &&
- !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
+ !ipv6_can_nonlocal_bind(sock_net(sk), inet)) {
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
dev, 0)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 28801ae80548..a22822bdbf39 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5206,9 +5206,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
* nexthops have been replaced by first new, the rest should
* be added to it.
*/
- cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
- NLM_F_REPLACE);
- cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+ if (cfg->fc_nlinfo.nlh) {
+ cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+ NLM_F_REPLACE);
+ cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+ }
nhn++;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a0a11624a5be..7a99892e5aba 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1788,8 +1788,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- sta->sdata->u.vlan.sta)
+ sta->sdata->u.vlan.sta) {
+ ieee80211_clear_fast_rx(sta);
RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
+ }
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
ieee80211_vif_dec_num_mcast(sta->sdata);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ce4e3855fec1..96f487fc0071 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4707,7 +4707,10 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
timeout = sta->rx_stats.last_rx;
timeout += IEEE80211_CONNECTION_IDLE_TIME;
- if (time_is_before_jiffies(timeout)) {
+ /* If timeout is after now, then update timer to fire at
+ * the later date, but do not actually probe at this time.
+ */
+ if (time_is_after_jiffies(timeout)) {
mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
return;
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5d06de61047a..3b3bcefbf657 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3573,7 +3573,7 @@ begin:
test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
goto out;
- if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
+ if (vif->txqs_stopped[txq->ac]) {
set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
goto out;
}
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 585d33144c33..55550ead2ced 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -152,7 +152,7 @@ err_tfm0:
crypto_free_sync_skcipher(key->tfm0);
err_tfm:
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
- if (key->tfm[i])
+ if (!IS_ERR_OR_NULL(key->tfm[i]))
crypto_free_aead(key->tfm[i]);
kfree_sensitive(key);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index e894345d10c1..8009b3f8e4c1 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -11,7 +11,6 @@
#include <linux/netdevice.h>
#include <linux/sched/signal.h>
#include <linux/atomic.h>
-#include <linux/igmp.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
@@ -20,7 +19,6 @@
#include <net/tcp_states.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
-#include <net/addrconf.h>
#endif
#include <net/mptcp.h>
#include <net/xfrm.h>
@@ -2871,6 +2869,48 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
return ret;
}
+static bool mptcp_unsupported(int level, int optname)
+{
+ if (level == SOL_IP) {
+ switch (optname) {
+ case IP_ADD_MEMBERSHIP:
+ case IP_ADD_SOURCE_MEMBERSHIP:
+ case IP_DROP_MEMBERSHIP:
+ case IP_DROP_SOURCE_MEMBERSHIP:
+ case IP_BLOCK_SOURCE:
+ case IP_UNBLOCK_SOURCE:
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ case MCAST_MSFILTER:
+ return true;
+ }
+ return false;
+ }
+ if (level == SOL_IPV6) {
+ switch (optname) {
+ case IPV6_ADDRFORM:
+ case IPV6_ADD_MEMBERSHIP:
+ case IPV6_DROP_MEMBERSHIP:
+ case IPV6_JOIN_ANYCAST:
+ case IPV6_LEAVE_ANYCAST:
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ case MCAST_MSFILTER:
+ return true;
+ }
+ return false;
+ }
+ return false;
+}
+
static int mptcp_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
@@ -2879,6 +2919,9 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
pr_debug("msk=%p", msk);
+ if (mptcp_unsupported(level, optname))
+ return -ENOPROTOOPT;
+
if (level == SOL_SOCKET)
return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
@@ -3409,34 +3452,10 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
return mask;
}
-static int mptcp_release(struct socket *sock)
-{
- struct mptcp_subflow_context *subflow;
- struct sock *sk = sock->sk;
- struct mptcp_sock *msk;
-
- if (!sk)
- return 0;
-
- lock_sock(sk);
-
- msk = mptcp_sk(sk);
-
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
- ip_mc_drop_socket(ssk);
- }
-
- release_sock(sk);
-
- return inet_release(sock);
-}
-
static const struct proto_ops mptcp_stream_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
- .release = mptcp_release,
+ .release = inet_release,
.bind = mptcp_bind,
.connect = mptcp_stream_connect,
.socketpair = sock_no_socketpair,
@@ -3528,35 +3547,10 @@ void __init mptcp_proto_init(void)
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
-static int mptcp6_release(struct socket *sock)
-{
- struct mptcp_subflow_context *subflow;
- struct mptcp_sock *msk;
- struct sock *sk = sock->sk;
-
- if (!sk)
- return 0;
-
- lock_sock(sk);
-
- msk = mptcp_sk(sk);
-
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
- ip_mc_drop_socket(ssk);
- ipv6_sock_mc_close(ssk);
- ipv6_sock_ac_close(ssk);
- }
-
- release_sock(sk);
- return inet6_release(sock);
-}
-
static const struct proto_ops mptcp_v6_stream_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
- .release = mptcp6_release,
+ .release = inet6_release,
.bind = mptcp_bind,
.connect = mptcp_stream_connect,
.socketpair = sock_no_socketpair,
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index a9cb355324d1..ffff8da707b8 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -105,13 +105,20 @@ static void ncsi_channel_monitor(struct timer_list *t)
monitor_state = nc->monitor.state;
spin_unlock_irqrestore(&nc->lock, flags);
- if (!enabled || chained) {
- ncsi_stop_channel_monitor(nc);
- return;
- }
+ if (!enabled)
+ return; /* expected race disabling timer */
+ if (WARN_ON_ONCE(chained))
+ goto bad_state;
+
if (state != NCSI_CHANNEL_INACTIVE &&
state != NCSI_CHANNEL_ACTIVE) {
- ncsi_stop_channel_monitor(nc);
+bad_state:
+ netdev_warn(ndp->ndev.dev,
+ "Bad NCSI monitor state channel %d 0x%x %s queue\n",
+ nc->id, state, chained ? "on" : "off");
+ spin_lock_irqsave(&nc->lock, flags);
+ nc->monitor.enabled = false;
+ spin_unlock_irqrestore(&nc->lock, flags);
return;
}
@@ -136,10 +143,9 @@ static void ncsi_channel_monitor(struct timer_list *t)
ncsi_report_link(ndp, true);
ndp->flags |= NCSI_DEV_RESHUFFLE;
- ncsi_stop_channel_monitor(nc);
-
ncm = &nc->modes[NCSI_MODE_LINK];
spin_lock_irqsave(&nc->lock, flags);
+ nc->monitor.enabled = false;
nc->state = NCSI_CHANNEL_INVISIBLE;
ncm->data[2] &= ~0x1;
spin_unlock_irqrestore(&nc->lock, flags);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index d257ed3b732a..a3b46f888803 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -108,11 +108,13 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
llcp_sock->service_name_len,
GFP_KERNEL);
if (!llcp_sock->service_name) {
+ nfc_llcp_local_put(llcp_sock->local);
ret = -ENOMEM;
goto put_dev;
}
llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ nfc_llcp_local_put(llcp_sock->local);
kfree(llcp_sock->service_name);
llcp_sock->service_name = NULL;
ret = -EADDRINUSE;
@@ -671,6 +673,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
ret = -EISCONN;
goto error;
}
+ if (sk->sk_state == LLCP_CONNECTING) {
+ ret = -EINPROGRESS;
+ goto error;
+ }
dev = nfc_get_device(addr->dev_idx);
if (dev == NULL) {
@@ -702,6 +708,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
llcp_sock->local = nfc_llcp_local_get(local);
llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ nfc_llcp_local_put(llcp_sock->local);
ret = -ENOMEM;
goto put_dev;
}
@@ -743,9 +750,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
sock_unlink:
nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+ kfree(llcp_sock->service_name);
+ llcp_sock->service_name = NULL;
sock_llcp_release:
nfc_llcp_put_ssap(local, llcp_sock->ssap);
+ nfc_llcp_local_put(llcp_sock->local);
put_dev:
nfc_put_device(dev);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c29b0ef1fc27..cadb6a29b285 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -2032,10 +2032,10 @@ static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
struct sk_buff *reply)
{
- struct ovs_zone_limit zone_limit;
-
- zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
- zone_limit.limit = info->default_limit;
+ struct ovs_zone_limit zone_limit = {
+ .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
+ .limit = info->default_limit,
+ };
return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
}
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 4b46c69e14ab..c0477bec09bd 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -272,7 +272,10 @@ static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (flow) {
init_waitqueue_head(&flow->resume_tx);
- radix_tree_insert(&node->qrtr_tx_flow, key, flow);
+ if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
+ kfree(flow);
+ flow = NULL;
+ }
}
}
mutex_unlock(&node->qrtr_tx_lock);
diff --git a/net/rds/message.c b/net/rds/message.c
index 071a261fdaab..4fc66ff0f1ec 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -180,6 +180,7 @@ void rds_message_put(struct rds_message *rm)
rds_message_purge(rm);
kfree(rm);
+ rm = NULL;
}
}
EXPORT_SYMBOL_GPL(rds_message_put);
@@ -347,8 +348,9 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
if (IS_ERR(rm->data.op_sg)) {
+ void *err = ERR_CAST(rm->data.op_sg);
rds_message_put(rm);
- return ERR_CAST(rm->data.op_sg);
+ return err;
}
for (i = 0; i < rm->data.op_nents; ++i) {
diff --git a/net/rds/send.c b/net/rds/send.c
index 53444397de66..ee7214ea0fdb 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -665,7 +665,7 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status)
unlock_and_drop:
spin_unlock_irqrestore(&rm->m_rs_lock, flags);
rds_message_put(rm);
- if (was_on_sock)
+ if (was_on_sock && rm)
rds_message_put(rm);
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 68d6ef9e59fc..ac15a944573f 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -69,7 +69,7 @@ struct rfkill {
struct rfkill_int_event {
struct list_head list;
- struct rfkill_event ev;
+ struct rfkill_event_ext ev;
};
struct rfkill_data {
@@ -253,7 +253,8 @@ static void rfkill_global_led_trigger_unregister(void)
}
#endif /* CONFIG_RFKILL_LEDS */
-static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
+static void rfkill_fill_event(struct rfkill_event_ext *ev,
+ struct rfkill *rfkill,
enum rfkill_operation op)
{
unsigned long flags;
@@ -1237,7 +1238,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct rfkill *rfkill;
- struct rfkill_event ev;
+ struct rfkill_event_ext ev;
int ret;
/* we don't need the 'hard' variable but accept it */
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index b919826939e0..f6d5755d669e 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -158,7 +158,7 @@ static int __tcf_action_put(struct tc_action *p, bool bind)
return 0;
}
-int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
+static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
{
int ret = 0;
@@ -184,7 +184,18 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
return ret;
}
-EXPORT_SYMBOL(__tcf_idr_release);
+
+int tcf_idr_release(struct tc_action *a, bool bind)
+{
+ const struct tc_action_ops *ops = a->ops;
+ int ret;
+
+ ret = __tcf_idr_release(a, bind, false);
+ if (ret == ACT_P_DELETED)
+ module_put(ops->owner);
+ return ret;
+}
+EXPORT_SYMBOL(tcf_idr_release);
static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
{
@@ -493,6 +504,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
}
p->idrinfo = idrinfo;
+ __module_get(ops->owner);
p->ops = ops;
*a = p;
return 0;
@@ -992,7 +1004,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind,
- struct tc_action_ops *a_o, bool rtnl_held,
+ struct tc_action_ops *a_o, int *init_res,
+ bool rtnl_held,
struct netlink_ext_ack *extack)
{
struct nla_bitfield32 flags = { 0, 0 };
@@ -1028,6 +1041,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
}
if (err < 0)
goto err_out;
+ *init_res = err;
if (!name && tb[TCA_ACT_COOKIE])
tcf_set_action_cookie(&a->act_cookie, cookie);
@@ -1035,13 +1049,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
if (!name)
a->hw_stats = hw_stats;
- /* module count goes up only when brand new policy is created
- * if it exists and is only bound to in a_o->init() then
- * ACT_P_CREATED is not returned (a zero is).
- */
- if (err != ACT_P_CREATED)
- module_put(a_o->owner);
-
return a;
err_out:
@@ -1056,7 +1063,7 @@ err_out:
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est, char *name, int ovr, int bind,
- struct tc_action *actions[], size_t *attr_size,
+ struct tc_action *actions[], int init_res[], size_t *attr_size,
bool rtnl_held, struct netlink_ext_ack *extack)
{
struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
@@ -1084,7 +1091,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
- ops[i - 1], rtnl_held, extack);
+ ops[i - 1], &init_res[i - 1], rtnl_held,
+ extack);
if (IS_ERR(act)) {
err = PTR_ERR(act);
goto err;
@@ -1100,7 +1108,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
tcf_idr_insert_many(actions);
*attr_size = tcf_action_full_attrs_size(sz);
- return i - 1;
+ err = i - 1;
+ goto err_mod;
err:
tcf_action_destroy(actions, bind);
@@ -1497,12 +1506,13 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
size_t attr_size = 0;
- int loop, ret;
+ int loop, ret, i;
struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
+ int init_res[TCA_ACT_MAX_PRIO] = {};
for (loop = 0; loop < 10; loop++) {
ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
- actions, &attr_size, true, extack);
+ actions, init_res, &attr_size, true, extack);
if (ret != -EAGAIN)
break;
}
@@ -1510,8 +1520,12 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
if (ret < 0)
return ret;
ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
- if (ovr)
- tcf_action_put_many(actions);
+
+ /* only put existing actions */
+ for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
+ if (init_res[i] == ACT_P_CREATED)
+ actions[i] = NULL;
+ tcf_action_put_many(actions);
return ret;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index d3db70865d66..40fbea626dfd 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -646,7 +646,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
struct net_device *dev = block_cb->indr.dev;
struct Qdisc *sch = block_cb->indr.sch;
struct netlink_ext_ack extack = {};
- struct flow_block_offload bo;
+ struct flow_block_offload bo = {};
tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
block_cb->indr.binder_type,
@@ -3040,6 +3040,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
{
#ifdef CONFIG_NET_CLS_ACT
{
+ int init_res[TCA_ACT_MAX_PRIO] = {};
struct tc_action *act;
size_t attr_size = 0;
@@ -3051,12 +3052,11 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
return PTR_ERR(a_o);
act = tcf_action_init_1(net, tp, tb[exts->police],
rate_tlv, "police", ovr,
- TCA_ACT_BIND, a_o, rtnl_held,
- extack);
- if (IS_ERR(act)) {
- module_put(a_o->owner);
+ TCA_ACT_BIND, a_o, init_res,
+ rtnl_held, extack);
+ module_put(a_o->owner);
+ if (IS_ERR(act))
return PTR_ERR(act);
- }
act->type = exts->type = TCA_OLD_COMPAT;
exts->actions[0] = act;
@@ -3067,8 +3067,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
err = tcf_action_init(net, tp, tb[exts->action],
rate_tlv, NULL, ovr, TCA_ACT_BIND,
- exts->actions, &attr_size,
- rtnl_held, extack);
+ exts->actions, init_res,
+ &attr_size, rtnl_held, extack);
if (err < 0)
return err;
exts->nr_actions = err;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 62e12cb41a3e..081c11d5717c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1675,9 +1675,10 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
cl->parent->common.classid,
NULL);
if (q->offload) {
- if (new_q)
+ if (new_q) {
htb_set_lockdep_class_child(new_q);
- htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+ htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+ }
}
}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 2f1f0a378408..6af6b95bdb67 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -134,6 +134,9 @@ teql_destroy(struct Qdisc *sch)
struct teql_sched_data *dat = qdisc_priv(sch);
struct teql_master *master = dat->m;
+ if (!master)
+ return;
+
prev = master->slaves;
if (prev) {
do {
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index c3e89c776e66..bd08807c9e44 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -664,8 +664,8 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
if (!(type & IPV6_ADDR_UNICAST))
return 0;
- return sp->inet.freebind || net->ipv6.sysctl.ip_nonlocal_bind ||
- ipv6_chk_addr(net, in6, NULL, 0);
+ return ipv6_can_nonlocal_bind(net, &sp->inet) ||
+ ipv6_chk_addr(net, in6, NULL, 0);
}
/* This function checks if the address is a valid address to be used for
@@ -954,8 +954,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
net = sock_net(&opt->inet.sk);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
- if (!dev || !(opt->inet.freebind ||
- net->ipv6.sysctl.ip_nonlocal_bind ||
+ if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) ||
ipv6_chk_addr(net, &addr->v6.sin6_addr,
dev, 0))) {
rcu_read_unlock();
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 76b8428c94a7..e5c43d4d5a75 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -1943,12 +1943,13 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
goto rcv;
if (tipc_aead_clone(&tmp, aead) < 0)
goto rcv;
+ WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
tipc_aead_free(&tmp->rcu);
goto rcv;
}
tipc_aead_put(aead);
- aead = tipc_aead_get((struct tipc_aead __force __rcu *)tmp);
+ aead = tmp;
}
if (unlikely(err)) {
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 3f927949bb23..a130195af188 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -89,7 +89,7 @@
* - A spin lock to protect the registry of kernel/driver users (reg.c)
* - A global spin_lock (tipc_port_lock), which only task is to ensure
* consistency where more than one port is involved in an operation,
- * i.e., whe a port is part of a linked list of ports.
+ * i.e., when a port is part of a linked list of ports.
* There are two such lists; 'port_list', which is used for management,
* and 'wait_list', which is used to queue ports during congestion.
*
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 707d0dc71fad..8217905348f4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1739,7 +1739,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
}
/* tipc_node_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
* messages, which will not be rejected
* The only exception is datagram messages rerouted after secondary
* lookup, which are rare and safe to dispose of anyway.
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f21162aa0cf7..58935cd0d068 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1262,7 +1262,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
skb_queue_splice_tail_init(&tmpq, inputq);
- kfree_skb(__skb_dequeue(arrvq));
+ __skb_dequeue(arrvq);
}
spin_unlock_bh(&inputq->lock);
__skb_queue_purge(&tmpq);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 034af85f79d8..b1df42e4f1eb 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/if.h>
@@ -229,9 +229,13 @@ static int validate_beacon_head(const struct nlattr *attr,
unsigned int len = nla_len(attr);
const struct element *elem;
const struct ieee80211_mgmt *mgmt = (void *)data;
- bool s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
unsigned int fixedlen, hdrlen;
+ bool s1g_bcn;
+ if (len < offsetofend(typeof(*mgmt), frame_control))
+ goto err;
+
+ s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
if (s1g_bcn) {
fixedlen = offsetof(struct ieee80211_ext,
u.s1g_beacon.variable);
@@ -5485,7 +5489,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
&params);
if (err)
- return err;
+ goto out;
}
nl80211_calculate_ap_params(&params);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 019952d4fc7d..758eb7d2a706 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2352,14 +2352,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
return NULL;
if (ext) {
- struct ieee80211_s1g_bcn_compat_ie *compat;
- u8 *ie;
+ const struct ieee80211_s1g_bcn_compat_ie *compat;
+ const struct element *elem;
- ie = (void *)cfg80211_find_ie(WLAN_EID_S1G_BCN_COMPAT,
- variable, ielen);
- if (!ie)
+ elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT,
+ variable, ielen);
+ if (!elem)
+ return NULL;
+ if (elem->datalen < sizeof(*compat))
return NULL;
- compat = (void *)(ie + 2);
+ compat = (void *)elem->data;
bssid = ext->u.s1g_beacon.sa;
capability = le16_to_cpu(compat->compat_info);
beacon_int = le16_to_cpu(compat->beacon_int);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 07756ca5e3b5..08a70b4f090c 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -529,7 +529,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
cfg80211_sme_free(wdev);
}
- if (WARN_ON(wdev->conn))
+ if (wdev->conn)
return -EINPROGRESS;
wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
index d8e8a11ca845..a20aec9d7393 100644
--- a/net/xfrm/xfrm_compat.c
+++ b/net/xfrm/xfrm_compat.c
@@ -216,7 +216,7 @@ static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
case XFRM_MSG_GETSADINFO:
case XFRM_MSG_GETSPDINFO:
default:
- WARN_ONCE(1, "unsupported nlmsg_type %d", nlh_src->nlmsg_type);
+ pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -277,7 +277,7 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
return xfrm_nla_cpy(dst, src, nla_len(src));
default:
BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
- WARN_ONCE(1, "unsupported nla_type %d", src->nla_type);
+ pr_warn_once("unsupported nla_type %d\n", src->nla_type);
return -EOPNOTSUPP;
}
}
@@ -315,8 +315,10 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
struct sk_buff *new = NULL;
int err;
- if (WARN_ON_ONCE(type >= ARRAY_SIZE(xfrm_msg_min)))
+ if (type >= ARRAY_SIZE(xfrm_msg_min)) {
+ pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
return -EOPNOTSUPP;
+ }
if (skb_shinfo(skb)->frag_list == NULL) {
new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC);
@@ -378,6 +380,10 @@ static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src,
struct nlmsghdr *nlmsg = dst;
struct nlattr *nla;
+ /* xfrm_user_rcv_msg_compat() relies on fact that 32-bit messages
+ * have the same len or shorted than 64-bit ones.
+ * 32-bit translation that is bigger than 64-bit original is unexpected.
+ */
if (WARN_ON_ONCE(copy_len > payload))
copy_len = payload;
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index edf11893dbe8..6d6917b68856 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -134,8 +134,6 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
return skb;
}
- xo->flags |= XFRM_XMIT;
-
if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
struct sk_buff *segs;
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 495b1f5c979b..8831f5a9e992 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -306,6 +306,8 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
} else {
+ if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+ goto xmit;
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
}
@@ -314,6 +316,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
return -EMSGSIZE;
}
+xmit:
xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
skb_dst_set(skb, dst);
skb->dev = tdev;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index a7ab19353313..e4cb0ff4dcf4 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -503,22 +503,22 @@ out:
return err;
}
-int xfrm_output_resume(struct sk_buff *skb, int err)
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
{
struct net *net = xs_net(skb_dst(skb)->xfrm);
while (likely((err = xfrm_output_one(skb, err)) == 0)) {
nf_reset_ct(skb);
- err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
+ err = skb_dst(skb)->ops->local_out(net, sk, skb);
if (unlikely(err != 1))
goto out;
if (!skb_dst(skb)->xfrm)
- return dst_output(net, skb->sk, skb);
+ return dst_output(net, sk, skb);
err = nf_hook(skb_dst(skb)->ops->family,
- NF_INET_POST_ROUTING, net, skb->sk, skb,
+ NF_INET_POST_ROUTING, net, sk, skb,
NULL, skb_dst(skb)->dev, xfrm_output2);
if (unlikely(err != 1))
goto out;
@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(xfrm_output_resume);
static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- return xfrm_output_resume(skb, 1);
+ return xfrm_output_resume(sk, skb, 1);
}
static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -660,6 +660,12 @@ static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
+ if (x->outer_mode.encap == XFRM_MODE_BEET &&
+ ip_is_fragment(ip_hdr(skb))) {
+ net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
+ return -EAFNOSUPPORT;
+ }
+
err = xfrm4_tunnel_check_size(skb);
if (err)
return err;
@@ -705,8 +711,15 @@ out:
static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
+ unsigned int ptr = 0;
int err;
+ if (x->outer_mode.encap == XFRM_MODE_BEET &&
+ ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
+ net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
+ return -EAFNOSUPPORT;
+ }
+
err = xfrm6_tunnel_check_size(skb);
if (err)
return err;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d01ca1a18418..4496f7efa220 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -44,7 +44,6 @@ static void xfrm_state_gc_task(struct work_struct *work);
*/
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
static struct kmem_cache *xfrm_state_cache __ro_after_init;
static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
@@ -140,7 +139,7 @@ static void xfrm_hash_resize(struct work_struct *work)
}
spin_lock_bh(&net->xfrm.xfrm_state_lock);
- write_seqcount_begin(&xfrm_state_hash_generation);
+ write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
@@ -156,7 +155,7 @@ static void xfrm_hash_resize(struct work_struct *work)
rcu_assign_pointer(net->xfrm.state_byspi, nspi);
net->xfrm.state_hmask = nhashmask;
- write_seqcount_end(&xfrm_state_hash_generation);
+ write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head);
@@ -1063,7 +1062,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
to_put = NULL;
- sequence = read_seqcount_begin(&xfrm_state_hash_generation);
+ sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
rcu_read_lock();
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
@@ -1176,7 +1175,7 @@ out:
if (to_put)
xfrm_state_put(to_put);
- if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
+ if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
*err = -EAGAIN;
if (x) {
xfrm_state_put(x);
@@ -2666,6 +2665,8 @@ int __net_init xfrm_state_init(struct net *net)
net->xfrm.state_num = 0;
INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
spin_lock_init(&net->xfrm.xfrm_state_lock);
+ seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
+ &net->xfrm.xfrm_state_lock);
return 0;
out_byspi:
diff --git a/scripts/module.lds.S b/scripts/module.lds.S
index 168cd27e6122..2c52535f9b56 100644
--- a/scripts/module.lds.S
+++ b/scripts/module.lds.S
@@ -20,6 +20,7 @@ SECTIONS {
__patchable_function_entries : { *(__patchable_function_entries) }
+#ifdef CONFIG_LTO_CLANG
/*
* With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
* -ffunction-sections, which increases the size of the final module.
@@ -41,6 +42,7 @@ SECTIONS {
}
.text : { *(.text .text.[0-9a-zA-Z_]*) }
+#endif
}
/* bring in arch-specific sections */
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 1d20003243c3..0ba01847e836 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -98,6 +98,14 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
struct rb_node *node, *parent = NULL;
struct integrity_iint_cache *iint, *test_iint;
+ /*
+ * The integrity's "iint_cache" is initialized at security_init(),
+ * unless it is not included in the ordered list of LSMs enabled
+ * on the boot command line.
+ */
+ if (!iint_cache)
+ panic("%s: lsm=integrity required.\n", __func__);
+
iint = integrity_iint_find(inode);
if (iint)
return iint;
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 6dcb6aa4db7f..75df32906055 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -109,7 +109,7 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
struct avtab_node *prev, *cur, *newnode;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h)
+ if (!h || !h->nslot)
return -EINVAL;
hvalue = avtab_hash(key, h->mask);
@@ -154,7 +154,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
struct avtab_node *prev, *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h)
+ if (!h || !h->nslot)
return NULL;
hvalue = avtab_hash(key, h->mask);
for (prev = NULL, cur = h->htable[hvalue];
@@ -184,7 +184,7 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
struct avtab_node *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h)
+ if (!h || !h->nslot)
return NULL;
hvalue = avtab_hash(key, h->mask);
@@ -220,7 +220,7 @@ avtab_search_node(struct avtab *h, struct avtab_key *key)
struct avtab_node *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
- if (!h)
+ if (!h || !h->nslot)
return NULL;
hvalue = avtab_hash(key, h->mask);
@@ -295,6 +295,7 @@ void avtab_destroy(struct avtab *h)
}
kvfree(h->htable);
h->htable = NULL;
+ h->nel = 0;
h->nslot = 0;
h->mask = 0;
}
@@ -303,88 +304,52 @@ void avtab_init(struct avtab *h)
{
h->htable = NULL;
h->nel = 0;
+ h->nslot = 0;
+ h->mask = 0;
}
-int avtab_alloc(struct avtab *h, u32 nrules)
+static int avtab_alloc_common(struct avtab *h, u32 nslot)
{
- u32 mask = 0;
- u32 shift = 0;
- u32 work = nrules;
- u32 nslot = 0;
-
- if (nrules == 0)
- goto avtab_alloc_out;
-
- while (work) {
- work = work >> 1;
- shift++;
- }
- if (shift > 2)
- shift = shift - 2;
- nslot = 1 << shift;
- if (nslot > MAX_AVTAB_HASH_BUCKETS)
- nslot = MAX_AVTAB_HASH_BUCKETS;
- mask = nslot - 1;
+ if (!nslot)
+ return 0;
h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL);
if (!h->htable)
return -ENOMEM;
- avtab_alloc_out:
- h->nel = 0;
h->nslot = nslot;
- h->mask = mask;
- pr_debug("SELinux: %d avtab hash slots, %d rules.\n",
- h->nslot, nrules);
+ h->mask = nslot - 1;
return 0;
}
-int avtab_duplicate(struct avtab *new, struct avtab *orig)
+int avtab_alloc(struct avtab *h, u32 nrules)
{
- int i;
- struct avtab_node *node, *tmp, *tail;
-
- memset(new, 0, sizeof(*new));
+ int rc;
+ u32 nslot = 0;
- new->htable = kvcalloc(orig->nslot, sizeof(void *), GFP_KERNEL);
- if (!new->htable)
- return -ENOMEM;
- new->nslot = orig->nslot;
- new->mask = orig->mask;
-
- for (i = 0; i < orig->nslot; i++) {
- tail = NULL;
- for (node = orig->htable[i]; node; node = node->next) {
- tmp = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
- if (!tmp)
- goto error;
- tmp->key = node->key;
- if (tmp->key.specified & AVTAB_XPERMS) {
- tmp->datum.u.xperms =
- kmem_cache_zalloc(avtab_xperms_cachep,
- GFP_KERNEL);
- if (!tmp->datum.u.xperms) {
- kmem_cache_free(avtab_node_cachep, tmp);
- goto error;
- }
- tmp->datum.u.xperms = node->datum.u.xperms;
- } else
- tmp->datum.u.data = node->datum.u.data;
-
- if (tail)
- tail->next = tmp;
- else
- new->htable[i] = tmp;
-
- tail = tmp;
- new->nel++;
+ if (nrules != 0) {
+ u32 shift = 1;
+ u32 work = nrules >> 3;
+ while (work) {
+ work >>= 1;
+ shift++;
}
+ nslot = 1 << shift;
+ if (nslot > MAX_AVTAB_HASH_BUCKETS)
+ nslot = MAX_AVTAB_HASH_BUCKETS;
+
+ rc = avtab_alloc_common(h, nslot);
+ if (rc)
+ return rc;
}
+ pr_debug("SELinux: %d avtab hash slots, %d rules.\n", nslot, nrules);
return 0;
-error:
- avtab_destroy(new);
- return -ENOMEM;
+}
+
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig)
+{
+ return avtab_alloc_common(new, orig->nslot);
}
void avtab_hash_eval(struct avtab *h, char *tag)
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 4c4445ca9118..f2eeb36265d1 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -89,7 +89,7 @@ struct avtab {
void avtab_init(struct avtab *h);
int avtab_alloc(struct avtab *, u32);
-int avtab_duplicate(struct avtab *new, struct avtab *orig);
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
void avtab_destroy(struct avtab *h);
void avtab_hash_eval(struct avtab *h, char *tag);
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 0b32f3ab025e..1ef74c085f2b 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -605,7 +605,6 @@ static int cond_dup_av_list(struct cond_av_list *new,
struct cond_av_list *orig,
struct avtab *avtab)
{
- struct avtab_node *avnode;
u32 i;
memset(new, 0, sizeof(*new));
@@ -615,10 +614,11 @@ static int cond_dup_av_list(struct cond_av_list *new,
return -ENOMEM;
for (i = 0; i < orig->len; i++) {
- avnode = avtab_search_node(avtab, &orig->nodes[i]->key);
- if (WARN_ON(!avnode))
- return -EINVAL;
- new->nodes[i] = avnode;
+ new->nodes[i] = avtab_insert_nonunique(avtab,
+ &orig->nodes[i]->key,
+ &orig->nodes[i]->datum);
+ if (!new->nodes[i])
+ return -ENOMEM;
new->len++;
}
@@ -630,7 +630,7 @@ static int duplicate_policydb_cond_list(struct policydb *newp,
{
int rc, i, j;
- rc = avtab_duplicate(&newp->te_cond_avtab, &origp->te_cond_avtab);
+ rc = avtab_alloc_dup(&newp->te_cond_avtab, &origp->te_cond_avtab);
if (rc)
return rc;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index d91e41d47777..301633145040 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1552,6 +1552,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
if (!str)
goto out;
}
+retry:
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -1565,6 +1566,15 @@ static int security_context_to_sid_core(struct selinux_state *state,
} else if (rc)
goto out_unlock;
rc = sidtab_context_to_sid(sidtab, &context, sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ if (context.str) {
+ str = context.str;
+ context.str = NULL;
+ }
+ context_destroy(&context);
+ goto retry;
+ }
context_destroy(&context);
out_unlock:
rcu_read_unlock();
@@ -1714,7 +1724,7 @@ static int security_compute_sid(struct selinux_state *state,
struct selinux_policy *policy;
struct policydb *policydb;
struct sidtab *sidtab;
- struct class_datum *cladatum = NULL;
+ struct class_datum *cladatum;
struct context *scontext, *tcontext, newcontext;
struct sidtab_entry *sentry, *tentry;
struct avtab_key avkey;
@@ -1736,6 +1746,8 @@ static int security_compute_sid(struct selinux_state *state,
goto out;
}
+retry:
+ cladatum = NULL;
context_init(&newcontext);
rcu_read_lock();
@@ -1880,6 +1892,11 @@ static int security_compute_sid(struct selinux_state *state,
}
/* Obtain the sid for the context. */
rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ context_destroy(&newcontext);
+ goto retry;
+ }
out_unlock:
rcu_read_unlock();
context_destroy(&newcontext);
@@ -2192,6 +2209,7 @@ void selinux_policy_commit(struct selinux_state *state,
struct selinux_load_state *load_state)
{
struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
+ unsigned long flags;
u32 seqno;
oldpolicy = rcu_dereference_protected(state->policy,
@@ -2213,7 +2231,13 @@ void selinux_policy_commit(struct selinux_state *state,
seqno = newpolicy->latest_granting;
/* Install the new policy. */
- rcu_assign_pointer(state->policy, newpolicy);
+ if (oldpolicy) {
+ sidtab_freeze_begin(oldpolicy->sidtab, &flags);
+ rcu_assign_pointer(state->policy, newpolicy);
+ sidtab_freeze_end(oldpolicy->sidtab, &flags);
+ } else {
+ rcu_assign_pointer(state->policy, newpolicy);
+ }
/* Load the policycaps from the new policy */
security_load_policycaps(state, newpolicy);
@@ -2357,13 +2381,15 @@ int security_port_sid(struct selinux_state *state,
struct policydb *policydb;
struct sidtab *sidtab;
struct ocontext *c;
- int rc = 0;
+ int rc;
if (!selinux_initialized(state)) {
*out_sid = SECINITSID_PORT;
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2382,6 +2408,10 @@ int security_port_sid(struct selinux_state *state,
if (!c->sid[0]) {
rc = sidtab_context_to_sid(sidtab, &c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2408,13 +2438,15 @@ int security_ib_pkey_sid(struct selinux_state *state,
struct policydb *policydb;
struct sidtab *sidtab;
struct ocontext *c;
- int rc = 0;
+ int rc;
if (!selinux_initialized(state)) {
*out_sid = SECINITSID_UNLABELED;
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2435,6 +2467,10 @@ int security_ib_pkey_sid(struct selinux_state *state,
rc = sidtab_context_to_sid(sidtab,
&c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2460,13 +2496,15 @@ int security_ib_endport_sid(struct selinux_state *state,
struct policydb *policydb;
struct sidtab *sidtab;
struct ocontext *c;
- int rc = 0;
+ int rc;
if (!selinux_initialized(state)) {
*out_sid = SECINITSID_UNLABELED;
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2487,6 +2525,10 @@ int security_ib_endport_sid(struct selinux_state *state,
if (!c->sid[0]) {
rc = sidtab_context_to_sid(sidtab, &c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2510,7 +2552,7 @@ int security_netif_sid(struct selinux_state *state,
struct selinux_policy *policy;
struct policydb *policydb;
struct sidtab *sidtab;
- int rc = 0;
+ int rc;
struct ocontext *c;
if (!selinux_initialized(state)) {
@@ -2518,6 +2560,8 @@ int security_netif_sid(struct selinux_state *state,
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2534,10 +2578,18 @@ int security_netif_sid(struct selinux_state *state,
if (!c->sid[0] || !c->sid[1]) {
rc = sidtab_context_to_sid(sidtab, &c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
rc = sidtab_context_to_sid(sidtab, &c->context[1],
&c->sid[1]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2587,6 +2639,7 @@ int security_node_sid(struct selinux_state *state,
return 0;
}
+retry:
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2635,6 +2688,10 @@ int security_node_sid(struct selinux_state *state,
rc = sidtab_context_to_sid(sidtab,
&c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2676,18 +2733,24 @@ int security_get_user_sids(struct selinux_state *state,
struct sidtab *sidtab;
struct context *fromcon, usercon;
u32 *mysids = NULL, *mysids2, sid;
- u32 mynel = 0, maxnel = SIDS_NEL;
+ u32 i, j, mynel, maxnel = SIDS_NEL;
struct user_datum *user;
struct role_datum *role;
struct ebitmap_node *rnode, *tnode;
- int rc = 0, i, j;
+ int rc;
*sids = NULL;
*nel = 0;
if (!selinux_initialized(state))
- goto out;
+ return 0;
+
+ mysids = kcalloc(maxnel, sizeof(*mysids), GFP_KERNEL);
+ if (!mysids)
+ return -ENOMEM;
+retry:
+ mynel = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2707,11 +2770,6 @@ int security_get_user_sids(struct selinux_state *state,
usercon.user = user->value;
- rc = -ENOMEM;
- mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
- if (!mysids)
- goto out_unlock;
-
ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
role = policydb->role_val_to_struct[i];
usercon.role = i + 1;
@@ -2723,6 +2781,10 @@ int security_get_user_sids(struct selinux_state *state,
continue;
rc = sidtab_context_to_sid(sidtab, &usercon, &sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out_unlock;
if (mynel < maxnel) {
@@ -2745,14 +2807,14 @@ out_unlock:
rcu_read_unlock();
if (rc || !mynel) {
kfree(mysids);
- goto out;
+ return rc;
}
rc = -ENOMEM;
mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
if (!mysids2) {
kfree(mysids);
- goto out;
+ return rc;
}
for (i = 0, j = 0; i < mynel; i++) {
struct av_decision dummy_avd;
@@ -2765,12 +2827,10 @@ out_unlock:
mysids2[j++] = mysids[i];
cond_resched();
}
- rc = 0;
kfree(mysids);
*sids = mysids2;
*nel = j;
-out:
- return rc;
+ return 0;
}
/**
@@ -2783,6 +2843,9 @@ out:
* Obtain a SID to use for a file in a filesystem that
* cannot support xattr or use a fixed labeling behavior like
* transition SIDs or task SIDs.
+ *
+ * WARNING: This function may return -ESTALE, indicating that the caller
+ * must retry the operation after re-acquiring the policy pointer!
*/
static inline int __security_genfs_sid(struct selinux_policy *policy,
const char *fstype,
@@ -2861,11 +2924,13 @@ int security_genfs_sid(struct selinux_state *state,
return 0;
}
- rcu_read_lock();
- policy = rcu_dereference(state->policy);
- retval = __security_genfs_sid(policy,
- fstype, path, orig_sclass, sid);
- rcu_read_unlock();
+ do {
+ rcu_read_lock();
+ policy = rcu_dereference(state->policy);
+ retval = __security_genfs_sid(policy, fstype, path,
+ orig_sclass, sid);
+ rcu_read_unlock();
+ } while (retval == -ESTALE);
return retval;
}
@@ -2888,7 +2953,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
struct selinux_policy *policy;
struct policydb *policydb;
struct sidtab *sidtab;
- int rc = 0;
+ int rc;
struct ocontext *c;
struct superblock_security_struct *sbsec = sb->s_security;
const char *fstype = sb->s_type->name;
@@ -2899,6 +2964,8 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -2916,6 +2983,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
if (!c->sid[0]) {
rc = sidtab_context_to_sid(sidtab, &c->context[0],
&c->sid[0]);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
goto out;
}
@@ -2923,6 +2994,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
} else {
rc = __security_genfs_sid(policy, fstype, "/",
SECCLASS_DIR, &sbsec->sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc) {
sbsec->behavior = SECURITY_FS_USE_NONE;
rc = 0;
@@ -3132,12 +3207,13 @@ int security_sid_mls_copy(struct selinux_state *state,
u32 len;
int rc;
- rc = 0;
if (!selinux_initialized(state)) {
*new_sid = sid;
- goto out;
+ return 0;
}
+retry:
+ rc = 0;
context_init(&newcon);
rcu_read_lock();
@@ -3196,10 +3272,14 @@ int security_sid_mls_copy(struct selinux_state *state,
}
}
rc = sidtab_context_to_sid(sidtab, &newcon, new_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ context_destroy(&newcon);
+ goto retry;
+ }
out_unlock:
rcu_read_unlock();
context_destroy(&newcon);
-out:
return rc;
}
@@ -3792,6 +3872,8 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
return 0;
}
+retry:
+ rc = 0;
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
@@ -3818,23 +3900,24 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
goto out;
}
rc = -EIDRM;
- if (!mls_context_isvalid(policydb, &ctx_new))
- goto out_free;
+ if (!mls_context_isvalid(policydb, &ctx_new)) {
+ ebitmap_destroy(&ctx_new.range.level[0].cat);
+ goto out;
+ }
rc = sidtab_context_to_sid(sidtab, &ctx_new, sid);
+ ebitmap_destroy(&ctx_new.range.level[0].cat);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ goto retry;
+ }
if (rc)
- goto out_free;
+ goto out;
security_netlbl_cache_add(secattr, *sid);
-
- ebitmap_destroy(&ctx_new.range.level[0].cat);
} else
*sid = SECSID_NULL;
- rcu_read_unlock();
- return 0;
-out_free:
- ebitmap_destroy(&ctx_new.range.level[0].cat);
out:
rcu_read_unlock();
return rc;
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index 5ee190bd30f5..656d50b09f76 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -39,6 +39,7 @@ int sidtab_init(struct sidtab *s)
for (i = 0; i < SECINITSID_NUM; i++)
s->isids[i].set = 0;
+ s->frozen = false;
s->count = 0;
s->convert = NULL;
hash_init(s->context_to_sid);
@@ -281,6 +282,15 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
if (*sid)
goto out_unlock;
+ if (unlikely(s->frozen)) {
+ /*
+ * This sidtab is now frozen - tell the caller to abort and
+ * get the new one.
+ */
+ rc = -ESTALE;
+ goto out_unlock;
+ }
+
count = s->count;
convert = s->convert;
@@ -474,6 +484,17 @@ void sidtab_cancel_convert(struct sidtab *s)
spin_unlock_irqrestore(&s->lock, flags);
}
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
+{
+ spin_lock_irqsave(&s->lock, *flags);
+ s->frozen = true;
+ s->convert = NULL;
+}
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
+{
+ spin_unlock_irqrestore(&s->lock, *flags);
+}
+
static void sidtab_destroy_entry(struct sidtab_entry *entry)
{
context_destroy(&entry->context);
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index 80c744d07ad6..4eff0e49dcb2 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -86,6 +86,7 @@ struct sidtab {
u32 count;
/* access only under spinlock */
struct sidtab_convert_params *convert;
+ bool frozen;
spinlock_t lock;
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
@@ -125,6 +126,9 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
void sidtab_cancel_convert(struct sidtab *s);
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock);
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock);
+
int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
void sidtab_destroy(struct sidtab *s);
diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
index 478f757ff843..8dc61335f65e 100644
--- a/security/tomoyo/network.c
+++ b/security/tomoyo/network.c
@@ -613,7 +613,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
static bool tomoyo_kernel_service(void)
{
/* Nothing to do if I am a kernel service. */
- return (current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD;
+ return current->flags & PF_KTHREAD;
}
/**
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 52637180af33..80b814b9922a 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -1571,6 +1571,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
return -ENOMEM;
kctl->id.device = dev;
kctl->id.subdevice = substr;
+
+ /* Add the control before copying the id so that
+ * the numid field of the id is set in the copy.
+ */
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ return err;
+
switch (idx) {
case ACTIVE_IDX:
setup->active_id = kctl->id;
@@ -1587,9 +1595,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
default:
break;
}
- err = snd_ctl_add(card, kctl);
- if (err < 0)
- return err;
}
}
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b59b0f323d4e..79ade335c8a0 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -989,8 +989,12 @@ static int azx_prepare(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
+ if (!azx_is_pm_ready(card))
+ return 0;
+
chip = card->private_data;
chip->pm_prepared = 1;
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
flush_work(&azx_bus(chip)->unsol_work);
@@ -1005,7 +1009,11 @@ static void azx_complete(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
+ if (!azx_is_pm_ready(card))
+ return;
+
chip = card->private_data;
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
chip->pm_prepared = 0;
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c20dad46a7c9..dfef9c17e140 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -944,6 +944,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 316b9b4ccb32..a7544b77d3f7 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3927,6 +3927,15 @@ static void alc271_fixup_dmic(struct hda_codec *codec,
snd_hda_sequence_write(codec, verbs);
}
+/* Fix the speaker amp after resume, etc */
+static void alc269vb_fixup_aspire_e1_coef(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ if (action == HDA_FIXUP_ACT_INIT)
+ alc_update_coef_idx(codec, 0x0d, 0x6000, 0x6000);
+}
+
static void alc269_fixup_pcm_44k(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -5256,7 +5265,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
case 0x10ec0274:
case 0x10ec0294:
alc_process_coef_fw(codec, coef0274);
- msleep(80);
+ msleep(850);
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x00f0) == 0x00f0;
break;
@@ -5440,6 +5449,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
snd_hda_gen_hp_automute(codec, jack);
+ alc_update_headset_mode(codec);
}
static void alc_probe_headset_mode(struct hda_codec *codec)
@@ -6300,6 +6310,7 @@ enum {
ALC283_FIXUP_HEADSET_MIC,
ALC255_FIXUP_MIC_MUTE_LED,
ALC282_FIXUP_ASPIRE_V5_PINS,
+ ALC269VB_FIXUP_ASPIRE_E1_COEF,
ALC280_FIXUP_HP_GPIO4,
ALC286_FIXUP_HP_GPIO_LED,
ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
@@ -6978,6 +6989,10 @@ static const struct hda_fixup alc269_fixups[] = {
{ },
},
},
+ [ALC269VB_FIXUP_ASPIRE_E1_COEF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269vb_fixup_aspire_e1_coef,
+ },
[ALC280_FIXUP_HP_GPIO4] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc280_fixup_hp_gpio4,
@@ -7900,6 +7915,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+ SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
@@ -8057,6 +8073,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
@@ -8393,6 +8410,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"},
{.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"},
{.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"},
+ {.id = ALC269VB_FIXUP_ASPIRE_E1_COEF, .name = "aspire-e1-coef"},
{.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"},
{.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
{.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"},
diff --git a/sound/soc/bcm/cygnus-ssp.c b/sound/soc/bcm/cygnus-ssp.c
index 6e634b448293..aa16a2375134 100644
--- a/sound/soc/bcm/cygnus-ssp.c
+++ b/sound/soc/bcm/cygnus-ssp.c
@@ -1348,8 +1348,10 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
&cygnus_ssp_dai[active_port_count]);
/* negative is err, 0 is active and good, 1 is disabled */
- if (err < 0)
+ if (err < 0) {
+ of_node_put(child_node);
return err;
+ }
else if (!err) {
dev_dbg(dev, "Activating DAI: %s\n",
cygnus_ssp_dai[active_port_count].name);
diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
index 8c04b3b2c907..7878da89d8e0 100644
--- a/sound/soc/codecs/lpass-rx-macro.c
+++ b/sound/soc/codecs/lpass-rx-macro.c
@@ -3551,7 +3551,7 @@ static int rx_macro_probe(struct platform_device *pdev)
/* set MCLK and NPL rates */
clk_set_rate(rx->clks[2].clk, MCLK_FREQ);
- clk_set_rate(rx->clks[3].clk, MCLK_FREQ);
+ clk_set_rate(rx->clks[3].clk, 2 * MCLK_FREQ);
ret = clk_bulk_prepare_enable(RX_NUM_CLKS_MAX, rx->clks);
if (ret)
diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
index 36d7a6442cdb..e8c6c738bbaa 100644
--- a/sound/soc/codecs/lpass-tx-macro.c
+++ b/sound/soc/codecs/lpass-tx-macro.c
@@ -1811,7 +1811,7 @@ static int tx_macro_probe(struct platform_device *pdev)
/* set MCLK and NPL rates */
clk_set_rate(tx->clks[2].clk, MCLK_FREQ);
- clk_set_rate(tx->clks[3].clk, MCLK_FREQ);
+ clk_set_rate(tx->clks[3].clk, 2 * MCLK_FREQ);
ret = clk_bulk_prepare_enable(TX_NUM_CLKS_MAX, tx->clks);
if (ret)
diff --git a/sound/soc/codecs/max98373-i2c.c b/sound/soc/codecs/max98373-i2c.c
index 85f6865019d4..ddb6436835d7 100644
--- a/sound/soc/codecs/max98373-i2c.c
+++ b/sound/soc/codecs/max98373-i2c.c
@@ -446,6 +446,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+ case MAX98373_R20FF_GLOBAL_SHDN:
case MAX98373_R21FF_REV_ID:
return true;
default:
diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
index d8c47667a9ea..f3a12205cd48 100644
--- a/sound/soc/codecs/max98373-sdw.c
+++ b/sound/soc/codecs/max98373-sdw.c
@@ -220,6 +220,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+ case MAX98373_R20FF_GLOBAL_SHDN:
case MAX98373_R21FF_REV_ID:
/* SoundWire Control Port Registers */
case MAX98373_R0040_SCP_INIT_STAT_1 ... MAX98373_R0070_SCP_FRAME_CTLR:
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index 746c829312b8..1346a98ce8a1 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -28,11 +28,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
regmap_update_bits(max98373->regmap,
MAX98373_R20FF_GLOBAL_SHDN,
MAX98373_GLOBAL_EN_MASK, 1);
+ usleep_range(30000, 31000);
break;
case SND_SOC_DAPM_POST_PMD:
regmap_update_bits(max98373->regmap,
MAX98373_R20FF_GLOBAL_SHDN,
MAX98373_GLOBAL_EN_MASK, 0);
+ usleep_range(30000, 31000);
max98373->tdm_mode = false;
break;
default:
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index df351519a3a6..cda9cd935d4f 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -707,7 +707,13 @@ int wm8960_configure_pll(struct snd_soc_component *component, int freq_in,
best_freq_out = -EINVAL;
*sysclk_idx = *dac_idx = *bclk_idx = -1;
- for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+ /*
+ * From Datasheet, the PLL performs best when f2 is between
+ * 90MHz and 100MHz, the desired sysclk output is 11.2896MHz
+ * or 12.288MHz, then sysclkdiv = 2 is the best choice.
+ * So search sysclk_divs from 2 to 1 other than from 1 to 2.
+ */
+ for (i = ARRAY_SIZE(sysclk_divs) - 1; i >= 0; --i) {
if (sysclk_divs[i] == -1)
continue;
for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 08056fa0a0fa..a857a624864f 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -519,11 +519,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
ESAI_SAICR_SYNC, esai_priv->synchronous ?
ESAI_SAICR_SYNC : 0);
- /* Set a default slot number -- 2 */
+ /* Set slots count */
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+ ESAI_xCCR_xDC_MASK,
+ ESAI_xCCR_xDC(esai_priv->slots));
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+ ESAI_xCCR_xDC_MASK,
+ ESAI_xCCR_xDC(esai_priv->slots));
}
return 0;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 9e9b05883557..4124aa2fc247 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -487,15 +487,15 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
.stream_name = "Headset Playback",
.channels_min = SST_STEREO,
.channels_max = SST_STEREO,
- .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "Headset Capture",
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
{
@@ -505,8 +505,8 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
.stream_name = "Deepbuffer Playback",
.channels_min = SST_STEREO,
.channels_max = SST_STEREO,
- .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
{
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 6d8f7d9fd192..4a3d522f612b 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -399,7 +399,13 @@ int snd_sof_device_shutdown(struct device *dev)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
- return snd_sof_shutdown(sdev);
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ cancel_work_sync(&sdev->probe_work);
+
+ if (sdev->fw_state == SOF_FW_BOOT_COMPLETE)
+ return snd_sof_shutdown(sdev);
+
+ return 0;
}
EXPORT_SYMBOL(snd_sof_device_shutdown);
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
index fc29b91b8932..c7ed2b3d6abc 100644
--- a/sound/soc/sof/intel/apl.c
+++ b/sound/soc/sof/intel/apl.c
@@ -27,9 +27,10 @@ static const struct snd_sof_debugfs_map apl_dsp_debugfs[] = {
/* apollolake ops */
const struct snd_sof_dsp_ops sof_apl_ops = {
- /* probe and remove */
+ /* probe/remove/shutdown */
.probe = hda_dsp_probe,
.remove = hda_dsp_remove,
+ .shutdown = hda_dsp_shutdown,
/* Register IO */
.write = sof_io_write,
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
index e38db519f38d..821f25fbcf08 100644
--- a/sound/soc/sof/intel/cnl.c
+++ b/sound/soc/sof/intel/cnl.c
@@ -232,9 +232,10 @@ void cnl_ipc_dump(struct snd_sof_dev *sdev)
/* cannonlake ops */
const struct snd_sof_dsp_ops sof_cnl_ops = {
- /* probe and remove */
+ /* probe/remove/shutdown */
.probe = hda_dsp_probe,
.remove = hda_dsp_remove,
+ .shutdown = hda_dsp_shutdown,
/* Register IO */
.write = sof_io_write,
@@ -349,22 +350,6 @@ const struct sof_intel_dsp_desc cnl_chip_info = {
};
EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
-const struct sof_intel_dsp_desc ehl_chip_info = {
- /* Elkhartlake */
- .cores_num = 4,
- .init_core_mask = 1,
- .host_managed_cores_mask = BIT(0),
- .ipc_req = CNL_DSP_REG_HIPCIDR,
- .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
- .ipc_ack = CNL_DSP_REG_HIPCIDA,
- .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
- .ipc_ctl = CNL_DSP_REG_HIPCCTL,
- .rom_init_timeout = 300,
- .ssp_count = ICL_SSP_COUNT,
- .ssp_base_offset = CNL_SSP_BASE_OFFSET,
-};
-EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
-
const struct sof_intel_dsp_desc jsl_chip_info = {
/* Jasperlake */
.cores_num = 2,
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index c3b757cf01a0..736a54beca23 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -226,10 +226,17 @@ bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
- is_enable = (val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) &&
- (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) &&
- !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
- !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+#define MASK_IS_EQUAL(v, m, field) ({ \
+ u32 _m = field(m); \
+ ((v) & _m) == _m; \
+})
+
+ is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
+ MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
+ !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+ !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+#undef MASK_IS_EQUAL
dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
is_enable, core_mask);
@@ -885,6 +892,12 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
}
+int hda_dsp_shutdown(struct snd_sof_dev *sdev)
+{
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
+ return snd_sof_suspend(sdev->dev);
+}
+
int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
{
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index 7c7579daee7f..ae80725b0e33 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -517,6 +517,7 @@ int hda_dsp_resume(struct snd_sof_dev *sdev);
int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
int hda_dsp_runtime_idle(struct snd_sof_dev *sdev);
+int hda_dsp_shutdown(struct snd_sof_dev *sdev);
int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev);
void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
void hda_ipc_dump(struct snd_sof_dev *sdev);
diff --git a/sound/soc/sof/intel/icl.c b/sound/soc/sof/intel/icl.c
index e9d5a0a58504..88a74be8a0c1 100644
--- a/sound/soc/sof/intel/icl.c
+++ b/sound/soc/sof/intel/icl.c
@@ -26,9 +26,10 @@ static const struct snd_sof_debugfs_map icl_dsp_debugfs[] = {
/* Icelake ops */
const struct snd_sof_dsp_ops sof_icl_ops = {
- /* probe and remove */
+ /* probe/remove/shutdown */
.probe = hda_dsp_probe,
.remove = hda_dsp_remove,
+ .shutdown = hda_dsp_shutdown,
/* Register IO */
.write = sof_io_write,
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
index 485607471181..38bc353f7313 100644
--- a/sound/soc/sof/intel/pci-tgl.c
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -65,7 +65,7 @@ static const struct sof_dev_desc ehl_desc = {
.default_tplg_path = "intel/sof-tplg",
.default_fw_filename = "sof-ehl.ri",
.nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
- .ops = &sof_cnl_ops,
+ .ops = &sof_tgl_ops,
};
static const struct sof_dev_desc adls_desc = {
diff --git a/sound/soc/sof/intel/tgl.c b/sound/soc/sof/intel/tgl.c
index 419f05ba1920..54ba1b88ba86 100644
--- a/sound/soc/sof/intel/tgl.c
+++ b/sound/soc/sof/intel/tgl.c
@@ -25,7 +25,7 @@ const struct snd_sof_dsp_ops sof_tgl_ops = {
/* probe/remove/shutdown */
.probe = hda_dsp_probe,
.remove = hda_dsp_remove,
- .shutdown = hda_dsp_remove,
+ .shutdown = hda_dsp_shutdown,
/* Register IO */
.write = sof_io_write,
@@ -156,6 +156,22 @@ const struct sof_intel_dsp_desc tglh_chip_info = {
};
EXPORT_SYMBOL_NS(tglh_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+const struct sof_intel_dsp_desc ehl_chip_info = {
+ /* Elkhartlake */
+ .cores_num = 4,
+ .init_core_mask = 1,
+ .host_managed_cores_mask = BIT(0),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
const struct sof_intel_dsp_desc adls_chip_info = {
/* Alderlake-S */
.cores_num = 2,
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 6c13cc84b3fb..2173991c13db 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -1364,6 +1364,7 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "sun4i-codec";
card->dapm_widgets = sun4i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun4i_codec_card_dapm_widgets);
@@ -1396,6 +1397,7 @@ static struct snd_soc_card *sun6i_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "A31 Audio Codec";
card->dapm_widgets = sun6i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1449,6 +1451,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "A23 Audio Codec";
card->dapm_widgets = sun6i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1487,6 +1490,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "H3 Audio Codec";
card->dapm_widgets = sun6i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1525,6 +1529,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
return ERR_PTR(-ENOMEM);
card->dev = dev;
+ card->owner = THIS_MODULE;
card->name = "V3s Audio Codec";
card->dapm_widgets = sun6i_codec_card_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index d3001fb18141..176437a441e6 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1521,6 +1521,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
+ case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
return true;
}
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 8b281f722e5b..f6afee209620 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1154,6 +1154,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
+#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
struct kvm_xen_hvm_config {
__u32 flags;
@@ -1621,12 +1622,24 @@ struct kvm_xen_vcpu_attr {
union {
__u64 gpa;
__u64 pad[8];
+ struct {
+ __u64 state;
+ __u64 state_entry_time;
+ __u64 time_running;
+ __u64 time_runnable;
+ __u64 time_blocked;
+ __u64 time_offline;
+ } runstate;
} u;
};
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
diff --git a/tools/kvm/kvm_stat/kvm_stat.service b/tools/kvm/kvm_stat/kvm_stat.service
index 71aabaffe779..8f13b843d5b4 100644
--- a/tools/kvm/kvm_stat/kvm_stat.service
+++ b/tools/kvm/kvm_stat/kvm_stat.service
@@ -9,6 +9,7 @@ Type=simple
ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
+RestartSec=60s
SyslogIdentifier=kvm_stat
SyslogLevel=debug
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 8caaafe7e312..e7a8d847161f 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -227,7 +227,7 @@ static int ringbuf_process_ring(struct ring* r)
if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) {
sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
err = r->sample_cb(r->ctx, sample, len);
- if (err) {
+ if (err < 0) {
/* update consumer pos and bail out */
smp_store_release(r->consumer_pos,
cons_pos);
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 95da0e19f4a5..cea62cc3e456 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -60,6 +60,8 @@ struct xsk_umem {
int fd;
int refcount;
struct list_head ctx_list;
+ bool rx_ring_setup_done;
+ bool tx_ring_setup_done;
};
struct xsk_ctx {
@@ -908,26 +910,30 @@ static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
return NULL;
}
-static void xsk_put_ctx(struct xsk_ctx *ctx)
+static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
{
struct xsk_umem *umem = ctx->umem;
struct xdp_mmap_offsets off;
int err;
- if (--ctx->refcount == 0) {
- err = xsk_get_mmap_offsets(umem->fd, &off);
- if (!err) {
- munmap(ctx->fill->ring - off.fr.desc,
- off.fr.desc + umem->config.fill_size *
- sizeof(__u64));
- munmap(ctx->comp->ring - off.cr.desc,
- off.cr.desc + umem->config.comp_size *
- sizeof(__u64));
- }
+ if (--ctx->refcount)
+ return;
- list_del(&ctx->list);
- free(ctx);
- }
+ if (!unmap)
+ goto out_free;
+
+ err = xsk_get_mmap_offsets(umem->fd, &off);
+ if (err)
+ goto out_free;
+
+ munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
+ sizeof(__u64));
+ munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
+ sizeof(__u64));
+
+out_free:
+ list_del(&ctx->list);
+ free(ctx);
}
static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
@@ -962,8 +968,6 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
ctx->ifname[IFNAMSIZ - 1] = '\0';
- umem->fill_save = NULL;
- umem->comp_save = NULL;
ctx->fill = fill;
ctx->comp = comp;
list_add(&ctx->list, &umem->ctx_list);
@@ -1019,6 +1023,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
struct xsk_socket *xsk;
struct xsk_ctx *ctx;
int err, ifindex;
+ bool unmap = umem->fill_save != fill;
+ bool rx_setup_done = false, tx_setup_done = false;
if (!umem || !xsk_ptr || !(rx || tx))
return -EFAULT;
@@ -1046,6 +1052,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
}
} else {
xsk->fd = umem->fd;
+ rx_setup_done = umem->rx_ring_setup_done;
+ tx_setup_done = umem->tx_ring_setup_done;
}
ctx = xsk_get_ctx(umem, ifindex, queue_id);
@@ -1065,7 +1073,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
xsk->ctx = ctx;
xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
- if (rx) {
+ if (rx && !rx_setup_done) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
&xsk->config.rx_size,
sizeof(xsk->config.rx_size));
@@ -1073,8 +1081,10 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
err = -errno;
goto out_put_ctx;
}
+ if (xsk->fd == umem->fd)
+ umem->rx_ring_setup_done = true;
}
- if (tx) {
+ if (tx && !tx_setup_done) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
&xsk->config.tx_size,
sizeof(xsk->config.tx_size));
@@ -1082,6 +1092,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
err = -errno;
goto out_put_ctx;
}
+ if (xsk->fd == umem->fd)
+ umem->rx_ring_setup_done = true;
}
err = xsk_get_mmap_offsets(xsk->fd, &off);
@@ -1160,6 +1172,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
}
*xsk_ptr = xsk;
+ umem->fill_save = NULL;
+ umem->comp_save = NULL;
return 0;
out_mmap_tx:
@@ -1171,7 +1185,7 @@ out_mmap_rx:
munmap(rx_map, off.rx.desc +
xsk->config.rx_size * sizeof(struct xdp_desc));
out_put_ctx:
- xsk_put_ctx(ctx);
+ xsk_put_ctx(ctx, unmap);
out_socket:
if (--umem->refcount)
close(xsk->fd);
@@ -1185,6 +1199,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
const struct xsk_socket_config *usr_config)
{
+ if (!umem)
+ return -EFAULT;
+
return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
rx, tx, umem->fill_save,
umem->comp_save, usr_config);
@@ -1236,7 +1253,7 @@ void xsk_socket__delete(struct xsk_socket *xsk)
}
}
- xsk_put_ctx(ctx);
+ xsk_put_ctx(ctx, true);
umem->refcount--;
/* Do not close an fd that also has an associated umem connected
diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
index ace8772a4f03..7c4a9d424a64 100644
--- a/tools/perf/builtin-daemon.c
+++ b/tools/perf/builtin-daemon.c
@@ -402,35 +402,42 @@ static pid_t handle_signalfd(struct daemon *daemon)
int status;
pid_t pid;
+ /*
+ * Take signal fd data as pure signal notification and check all
+ * the sessions state. The reason is that multiple signals can get
+ * coalesced in kernel and we can receive only single signal even
+ * if multiple SIGCHLD were generated.
+ */
err = read(daemon->signal_fd, &si, sizeof(struct signalfd_siginfo));
- if (err != sizeof(struct signalfd_siginfo))
+ if (err != sizeof(struct signalfd_siginfo)) {
+ pr_err("failed to read signal fd\n");
return -1;
+ }
list_for_each_entry(session, &daemon->sessions, list) {
+ if (session->pid == -1)
+ continue;
- if (session->pid != (int) si.ssi_pid)
+ pid = waitpid(session->pid, &status, WNOHANG);
+ if (pid <= 0)
continue;
- pid = waitpid(session->pid, &status, 0);
- if (pid == session->pid) {
- if (WIFEXITED(status)) {
- pr_info("session '%s' exited, status=%d\n",
- session->name, WEXITSTATUS(status));
- } else if (WIFSIGNALED(status)) {
- pr_info("session '%s' killed (signal %d)\n",
- session->name, WTERMSIG(status));
- } else if (WIFSTOPPED(status)) {
- pr_info("session '%s' stopped (signal %d)\n",
- session->name, WSTOPSIG(status));
- } else {
- pr_info("session '%s' Unexpected status (0x%x)\n",
- session->name, status);
- }
+ if (WIFEXITED(status)) {
+ pr_info("session '%s' exited, status=%d\n",
+ session->name, WEXITSTATUS(status));
+ } else if (WIFSIGNALED(status)) {
+ pr_info("session '%s' killed (signal %d)\n",
+ session->name, WTERMSIG(status));
+ } else if (WIFSTOPPED(status)) {
+ pr_info("session '%s' stopped (signal %d)\n",
+ session->name, WSTOPSIG(status));
+ } else {
+ pr_info("session '%s' Unexpected status (0x%x)\n",
+ session->name, status);
}
session->state = KILL;
session->pid = -1;
- return pid;
}
return 0;
@@ -443,7 +450,6 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
.fd = daemon->signal_fd,
.events = POLLIN,
};
- pid_t wpid = 0, pid = session->pid;
time_t start;
start = time(NULL);
@@ -452,7 +458,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
int err = poll(&pollfd, 1, 1000);
if (err > 0) {
- wpid = handle_signalfd(daemon);
+ handle_signalfd(daemon);
} else if (err < 0) {
perror("failed: poll\n");
return -1;
@@ -460,7 +466,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
if (start + secs < time(NULL))
return -1;
- } while (wpid != pid);
+ } while (session->pid != -1);
return 0;
}
@@ -902,7 +908,9 @@ static void daemon_session__kill(struct daemon_session *session,
daemon_session__signal(session, SIGKILL);
break;
default:
- break;
+ pr_err("failed to wait for session %s\n",
+ session->name);
+ return;
}
how++;
@@ -955,7 +963,8 @@ static void daemon__kill(struct daemon *daemon)
daemon__signal(daemon, SIGKILL);
break;
default:
- break;
+ pr_err("failed to wait for sessions\n");
+ return;
}
how++;
@@ -1344,7 +1353,7 @@ out:
close(sock_fd);
if (conf_fd != -1)
close(conf_fd);
- if (conf_fd != -1)
+ if (signal_fd != -1)
close(signal_fd);
pr_info("daemon exited\n");
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index f57e075b0ed2..c72adbd67386 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -86,7 +86,7 @@ static struct {
.msg_load_fail = "check your vmlinux setting?",
.target_func = &epoll_pwait_loop,
.expect_result = (NR_ITERS + 1) / 2,
- .pin = true,
+ .pin = true,
},
#ifdef HAVE_BPF_PROLOGUE
{
@@ -99,13 +99,6 @@ static struct {
.expect_result = (NR_ITERS + 1) / 4,
},
#endif
- {
- .prog_id = LLVM_TESTCASE_BPF_RELOCATION,
- .desc = "BPF relocation checker",
- .name = "[bpf_relocation_test]",
- .msg_compile_fail = "fix 'perf test LLVM' first",
- .msg_load_fail = "libbpf error when dealing with relocation",
- },
};
static int do_test(struct bpf_object *obj, int (*func)(void),
diff --git a/tools/perf/tests/shell/daemon.sh b/tools/perf/tests/shell/daemon.sh
index 5ad3ca8d681b..58984380b211 100755
--- a/tools/perf/tests/shell/daemon.sh
+++ b/tools/perf/tests/shell/daemon.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# daemon operations
# SPDX-License-Identifier: GPL-2.0
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 953f4afacd3b..5b6ccb90b397 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -298,10 +298,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
queue->set = true;
queue->tid = buffer->tid;
queue->cpu = buffer->cpu;
- } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
- pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
- queue->cpu, queue->tid, buffer->cpu, buffer->tid);
- return -EINVAL;
}
buffer->buffer_nr = queues->next_buffer_nr++;
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 57d58c81a5f8..cdecda1ddd36 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -196,25 +196,32 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
}
if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+ free(info_linear);
pr_debug("%s: the kernel is too old, aborting\n", __func__);
return -2;
}
info = &info_linear->info;
+ if (!info->jited_ksyms) {
+ free(info_linear);
+ return -1;
+ }
/* number of ksyms, func_lengths, and tags should match */
sub_prog_cnt = info->nr_jited_ksyms;
if (sub_prog_cnt != info->nr_prog_tags ||
- sub_prog_cnt != info->nr_jited_func_lens)
+ sub_prog_cnt != info->nr_jited_func_lens) {
+ free(info_linear);
return -1;
+ }
/* check BTF func info support */
if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
/* btf func info number should be same as sub_prog_cnt */
if (sub_prog_cnt != info->nr_func_info) {
pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
- err = -1;
- goto out;
+ free(info_linear);
+ return -1;
}
if (btf__get_from_id(info->btf_id, &btf)) {
pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 42c84adeb2fb..c0c0fab22cb8 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -356,6 +356,9 @@ __add_event(struct list_head *list, int *idx,
struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
+ if (pmu && attr->type == PERF_TYPE_RAW)
+ perf_pmu__warn_invalid_config(pmu, attr->config, name);
+
if (init_attr)
event_attr_init(attr);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 44ef28302fc7..46fd0f998484 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1812,3 +1812,36 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
return nr_caps;
}
+
+void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+ char *name)
+{
+ struct perf_pmu_format *format;
+ __u64 masks = 0, bits;
+ char buf[100];
+ unsigned int i;
+
+ list_for_each_entry(format, &pmu->format, list) {
+ if (format->value != PERF_PMU_FORMAT_VALUE_CONFIG)
+ continue;
+
+ for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS)
+ masks |= 1ULL << i;
+ }
+
+ /*
+ * Kernel doesn't export any valid format bits.
+ */
+ if (masks == 0)
+ return;
+
+ bits = config & ~masks;
+ if (bits == 0)
+ return;
+
+ bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf));
+
+ pr_warning("WARNING: event '%s' not valid (bits %s of config "
+ "'%llx' not supported by kernel)!\n",
+ name ?: "N/A", buf, config);
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 8164388478c6..160b0f561771 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -123,4 +123,7 @@ int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
int perf_pmu__caps_parse(struct perf_pmu *pmu);
+void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+ char *name);
+
#endif /* __PMU_H */
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index b698046ec2db..dff178103ce5 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -424,7 +424,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
while (!io.eof) {
static const char anonstr[] = "//anon";
- size_t size;
+ size_t size, aligned_size;
/* ensure null termination since stack will be reused. */
event->mmap2.filename[0] = '\0';
@@ -484,11 +484,12 @@ out:
}
size = strlen(event->mmap2.filename) + 1;
- size = PERF_ALIGN(size, sizeof(u64));
+ aligned_size = PERF_ALIGN(size, sizeof(u64));
event->mmap2.len -= event->mmap.start;
event->mmap2.header.size = (sizeof(event->mmap2) -
- (sizeof(event->mmap2.filename) - size));
- memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+ (sizeof(event->mmap2.filename) - aligned_size));
+ memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
+ (aligned_size - size));
event->mmap2.header.size += machine->id_hdr_size;
event->mmap2.pid = tgid;
event->mmap2.tid = pid;
@@ -758,7 +759,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
for (i = 0; i < n; i++) {
char *end;
pid_t _pid;
- bool kernel_thread;
+ bool kernel_thread = false;
_pid = strtol(dirent[i]->d_name, &end, 10);
if (*end)
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 3cc91ad048ea..43beb169631d 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -133,6 +133,8 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s
if (dso != NULL) {
__dsos__add(&machine->dsos, dso);
dso__set_long_name(dso, long_name, false);
+ /* Put dso here because __dsos_add already got it */
+ dso__put(dso);
}
return dso;
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 3b796dd5e577..ca24f6839d50 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -296,21 +296,34 @@ static void *idr_throbber(void *arg)
return NULL;
}
+/*
+ * There are always either 1 or 2 objects in the IDR. If we find nothing,
+ * or we find something at an ID we didn't expect, that's a bug.
+ */
void idr_find_test_1(int anchor_id, int throbber_id)
{
pthread_t throbber;
time_t start = time(NULL);
- pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
-
BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
anchor_id + 1, GFP_KERNEL) != anchor_id);
+ pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
+
+ rcu_read_lock();
do {
int id = 0;
void *entry = idr_get_next(&find_idr, &id);
- BUG_ON(entry != xa_mk_value(id));
+ rcu_read_unlock();
+ if ((id != anchor_id && id != throbber_id) ||
+ entry != xa_mk_value(id)) {
+ printf("%s(%d, %d): %p at %d\n", __func__, anchor_id,
+ throbber_id, entry, id);
+ abort();
+ }
+ rcu_read_lock();
} while (time(NULL) < start + 11);
+ rcu_read_unlock();
pthread_join(throbber, NULL);
@@ -577,6 +590,7 @@ void ida_tests(void)
int __weak main(void)
{
+ rcu_register_thread();
radix_tree_init();
idr_checks();
ida_tests();
@@ -584,5 +598,6 @@ int __weak main(void)
rcu_barrier();
if (nr_allocated)
printf("nr_allocated = %d\n", nr_allocated);
+ rcu_unregister_thread();
return 0;
}
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/radix-tree/linux/compiler_types.h
+++ /dev/null
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index 9eae0fb5a67d..e00520cc6349 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -224,7 +224,9 @@ void multiorder_checks(void)
int __weak main(void)
{
+ rcu_register_thread();
radix_tree_init();
multiorder_checks();
+ rcu_unregister_thread();
return 0;
}
diff --git a/tools/testing/radix-tree/xarray.c b/tools/testing/radix-tree/xarray.c
index e61e43efe463..f20e12cbbfd4 100644
--- a/tools/testing/radix-tree/xarray.c
+++ b/tools/testing/radix-tree/xarray.c
@@ -25,11 +25,13 @@ void xarray_tests(void)
int __weak main(void)
{
+ rcu_register_thread();
radix_tree_init();
xarray_tests();
radix_tree_cpu_dead(1);
rcu_barrier();
if (nr_allocated)
printf("nr_allocated = %d\n", nr_allocated);
+ rcu_unregister_thread();
return 0;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 37c5494a0381..e25917f04602 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -6,6 +6,7 @@
#include <test_progs.h>
#include "bpf_dctcp.skel.h"
#include "bpf_cubic.skel.h"
+#include "bpf_tcp_nogpl.skel.h"
#define min(a, b) ((a) < (b) ? (a) : (b))
@@ -227,10 +228,53 @@ static void test_dctcp(void)
bpf_dctcp__destroy(dctcp_skel);
}
+static char *err_str;
+static bool found;
+
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ char *log_buf;
+
+ if (level != LIBBPF_WARN ||
+ strcmp(format, "libbpf: \n%s\n")) {
+ vprintf(format, args);
+ return 0;
+ }
+
+ log_buf = va_arg(args, char *);
+ if (!log_buf)
+ goto out;
+ if (err_str && strstr(log_buf, err_str) != NULL)
+ found = true;
+out:
+ printf(format, log_buf);
+ return 0;
+}
+
+static void test_invalid_license(void)
+{
+ libbpf_print_fn_t old_print_fn;
+ struct bpf_tcp_nogpl *skel;
+
+ err_str = "struct ops programs must have a GPL compatible license";
+ found = false;
+ old_print_fn = libbpf_set_print(libbpf_debug_print);
+
+ skel = bpf_tcp_nogpl__open_and_load();
+ ASSERT_NULL(skel, "bpf_tcp_nogpl");
+ ASSERT_EQ(found, true, "expected_err_msg");
+
+ bpf_tcp_nogpl__destroy(skel);
+ libbpf_set_print(old_print_fn);
+}
+
void test_bpf_tcp_ca(void)
{
if (test__start_subtest("dctcp"))
test_dctcp();
if (test__start_subtest("cubic"))
test_cubic();
+ if (test__start_subtest("invalid_license"))
+ test_invalid_license();
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c b/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c
new file mode 100644
index 000000000000..2ecd833dcd41
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "X";
+
+void BPF_STRUCT_OPS(nogpltcp_init, struct sock *sk)
+{
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops bpf_nogpltcp = {
+ .init = (void *)nogpltcp_init,
+ .name = "bpf_nogpltcp",
+};
diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
index 2f2eeb8a1d86..5aadf84c91c0 100644
--- a/tools/testing/selftests/kvm/hardware_disable_test.c
+++ b/tools/testing/selftests/kvm/hardware_disable_test.c
@@ -108,7 +108,7 @@ static void run_test(uint32_t run)
kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
vm_create_irqchip(vm);
- fprintf(stderr, "%s: [%d] start vcpus\n", __func__, run);
+ pr_debug("%s: [%d] start vcpus\n", __func__, run);
for (i = 0; i < VCPU_NUM; ++i) {
vm_vcpu_add_default(vm, i, guest_code);
payloads[i].vm = vm;
@@ -124,7 +124,7 @@ static void run_test(uint32_t run)
check_set_affinity(throw_away, &cpu_set);
}
}
- fprintf(stderr, "%s: [%d] all threads launched\n", __func__, run);
+ pr_debug("%s: [%d] all threads launched\n", __func__, run);
sem_post(sem);
for (i = 0; i < VCPU_NUM; ++i)
check_join(threads[i], &b);
@@ -147,16 +147,16 @@ int main(int argc, char **argv)
if (pid == 0)
run_test(i); /* This function always exits */
- fprintf(stderr, "%s: [%d] waiting semaphore\n", __func__, i);
+ pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
sem_wait(sem);
r = (rand() % DELAY_US_MAX) + 1;
- fprintf(stderr, "%s: [%d] waiting %dus\n", __func__, i, r);
+ pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
usleep(r);
r = waitpid(pid, &s, WNOHANG);
TEST_ASSERT(r != pid,
"%s: [%d] child exited unexpectedly status: [%d]",
__func__, i, s);
- fprintf(stderr, "%s: [%d] killing child\n", __func__, i);
+ pr_debug("%s: [%d] killing child\n", __func__, i);
kill(pid, SIGKILL);
}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
index ffbc4555c6e2..7f1d2765572c 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
@@ -80,19 +80,24 @@ static inline void check_tsc_msr_rdtsc(void)
GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100);
}
+static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page)
+{
+ return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+}
+
static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
{
u64 r1, r2, t1, t2;
/* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */
- t1 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+ t1 = get_tscpage_ts(tsc_page);
r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
/* 10 ms tolerance */
GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000);
nop_loop();
- t2 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+ t2 = get_tscpage_ts(tsc_page);
r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
}
@@ -130,7 +135,11 @@ static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_
tsc_offset = tsc_page->tsc_offset;
/* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */
+
GUEST_SYNC(7);
+ /* Sanity check TSC page timestamp, it should be close to 0 */
+ GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000);
+
GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset);
nop_loop();
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
index 0ccb1dda099a..eb307ca37bfa 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -657,10 +657,21 @@ test_ecn_decap()
{
# In accordance with INET_ECN_decapsulate()
__test_ecn_decap 00 00 0x00
+ __test_ecn_decap 00 01 0x00
+ __test_ecn_decap 00 02 0x00
+ # 00 03 is tested in test_ecn_decap_error()
+ __test_ecn_decap 01 00 0x01
__test_ecn_decap 01 01 0x01
- __test_ecn_decap 02 01 0x01
+ __test_ecn_decap 01 02 0x01
__test_ecn_decap 01 03 0x03
+ __test_ecn_decap 02 00 0x02
+ __test_ecn_decap 02 01 0x01
+ __test_ecn_decap 02 02 0x02
__test_ecn_decap 02 03 0x03
+ __test_ecn_decap 03 00 0x03
+ __test_ecn_decap 03 01 0x03
+ __test_ecn_decap 03 02 0x03
+ __test_ecn_decap 03 03 0x03
test_ecn_decap_error
}